[ns_server:info,2014-08-19T15:37:51.645,nonode@nohost:<0.58.0>:ns_server:init_logging:248]Started & configured logging
[ns_server:info,2014-08-19T15:37:51.648,nonode@nohost:<0.58.0>:ns_server:log_pending:30]Static config terms:
[{error_logger_mf_dir,"/opt/couchbase/var/lib/couchbase/logs"},
{error_logger_mf_maxbytes,10485760},
{error_logger_mf_maxfiles,20},
{path_config_bindir,"/opt/couchbase/bin"},
{path_config_etcdir,"/opt/couchbase/etc/couchbase"},
{path_config_libdir,"/opt/couchbase/lib"},
{path_config_datadir,"/opt/couchbase/var/lib/couchbase"},
{path_config_tmpdir,"/opt/couchbase/var/lib/couchbase/tmp"},
{nodefile,"/opt/couchbase/var/lib/couchbase/couchbase-server.node"},
{loglevel_default,debug},
{loglevel_couchdb,info},
{loglevel_ns_server,debug},
{loglevel_error_logger,debug},
{loglevel_user,debug},
{loglevel_menelaus,debug},
{loglevel_ns_doctor,debug},
{loglevel_stats,debug},
{loglevel_rebalance,debug},
{loglevel_cluster,debug},
{loglevel_views,debug},
{loglevel_mapreduce_errors,debug},
{loglevel_xdcr,debug}]
[ns_server:info,2014-08-19T15:37:51.814,nonode@nohost:<0.58.0>:ns_server:start:58]Locked myself into a memory successfully.
[error_logger:info,2014-08-19T15:37:51.869,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,crypto_sup}
started: [{pid,<0.167.0>},
{name,crypto_server},
{mfargs,{crypto_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:51.869,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: crypto
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:51.884,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: asn1
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:51.891,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: public_key
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:51.903,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.174.0>},
{name,ftp_sup},
{mfargs,{ftp_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.930,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_profile_sup}
started: [{pid,<0.177.0>},
{name,httpc_manager},
{mfargs,
{httpc_manager,start_link,
[default,only_session_cookies,inets]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:51.931,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_sup}
started: [{pid,<0.176.0>},
{name,httpc_profile_sup},
{mfargs,
{httpc_profile_sup,start_link,
[[{httpc,{default,only_session_cookies}}]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.934,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_sup}
started: [{pid,<0.178.0>},
{name,httpc_handler_sup},
{mfargs,{httpc_handler_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.934,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.175.0>},
{name,httpc_sup},
{mfargs,
{httpc_sup,start_link,
[[{httpc,{default,only_session_cookies}}]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.938,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.179.0>},
{name,httpd_sup},
{mfargs,{httpd_sup,start_link,[[]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.941,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.180.0>},
{name,tftp_sup},
{mfargs,{tftp_sup,start_link,[[]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.941,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: inets
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:51.941,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: oauth
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:51.953,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.186.0>},
{name,ssl_broker_sup},
{mfargs,{ssl_broker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.959,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.187.0>},
{name,ssl_manager},
{mfargs,{ssl_manager,start_link,[[]]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:51.961,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.188.0>},
{name,ssl_connection},
{mfargs,{ssl_connection_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:51.961,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: ssl
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.113,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.195.0>},
{name,ssl_server},
{mfargs,{ssl_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.114,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,lhttpc_sup}
started: [{pid,<0.193.0>},
{name,lhttpc_manager},
{mfargs,
{lhttpc_manager,start_link,
[[{name,lhttpc_manager}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.114,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: lhttpc
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.117,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: xmerl
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.127,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: compiler
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.131,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: syntax_tools
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.131,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: mochiweb
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.133,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_view_parser
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.136,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_set_view
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.138,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_index_merger
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.140,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: mapreduce
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:37:52.173,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.204.0>},
{name,couch_config},
{mfargs,
{couch_server_sup,couch_config_start_link_wrapper,
[["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/default.d/capi.ini",
"/opt/couchbase/etc/couchdb/default.d/geocouch.ini",
"/opt/couchbase/etc/couchdb/local.ini"],
<0.204.0>]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.194,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.207.0>},
{name,collation_driver},
{mfargs,{couch_drv,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.195,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.208.0>},
{name,couch_task_events},
{mfargs,
{gen_event,start_link,[{local,couch_task_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.197,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.209.0>},
{name,couch_task_status},
{mfargs,{couch_task_status,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.198,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.210.0>},
{name,couch_file_write_guard},
{mfargs,{couch_file_write_guard,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.206,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.211.0>},
{name,couch_server},
{mfargs,{couch_server,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.207,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.212.0>},
{name,couch_db_update_event},
{mfargs,
{gen_event,start_link,[{local,couch_db_update}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.207,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.213.0>},
{name,couch_replication_event},
{mfargs,
{gen_event,start_link,[{local,couch_replication}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.208,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.214.0>},
{name,couch_replication_supervisor},
{mfargs,{couch_rep_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.210,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.215.0>},
{name,couch_log},
{mfargs,{couch_log,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.214,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.216.0>},
{name,couch_main_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_main_index_barrier,
"max_parallel_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.214,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.217.0>},
{name,couch_replica_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_replica_index_barrier,
"max_parallel_replica_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.214,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.218.0>},
{name,couch_spatial_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_spatial_index_barrier,
"max_parallel_spatial_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.214,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.206.0>},
{name,couch_primary_services},
{mfargs,{couch_primary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.218,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.220.0>},
{name,couch_db_update_notifier_sup},
{mfargs,{couch_db_update_notifier_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.302,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.221.0>},
{name,auth_cache},
{mfargs,{couch_auth_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.311,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.232.0>},
{name,set_view_manager},
{mfargs,{couch_set_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.314,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.235.0>},
{name,spatial_manager},
{mfargs,{couch_spatial,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.314,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.237.0>},
{name,index_merger_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{connection_timeout,90000},
{pool_size,10000},
{name,couch_index_merger_connection_pool}]]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.318,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.238.0>},
{name,query_servers},
{mfargs,{couch_query_servers,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.321,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.240.0>},
{name,couch_set_view_ddoc_cache},
{mfargs,{couch_set_view_ddoc_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.324,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.242.0>},
{name,view_manager},
{mfargs,{couch_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.339,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.244.0>},
{name,httpd},
{mfargs,{couch_httpd,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.339,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.261.0>},
{name,uuids},
{mfargs,{couch_uuids,start,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.339,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.219.0>},
{name,couch_secondary_services},
{mfargs,{couch_secondary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.339,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,cb_couch_sup}
started: [{pid,<0.205.0>},
{name,couch_app},
{mfargs,
{couch_app,start,
[fake,
["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/local.ini"]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.339,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.162.0>},
{name,cb_couch_sup},
{mfargs,{cb_couch_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,supervisor}]
[ns_server:info,2014-08-19T15:37:52.348,nonode@nohost:ns_server_cluster_sup<0.161.0>:log_os_info:start_link:25]OS type: {unix,linux} Version: {2,6,32}
Runtime info: [{otp_release,"R14B04"},
{erl_version,"5.8.5"},
{erl_version_long,
"Erlang R14B04 (erts-5.8.5) [source] [64-bit] [smp:24:24] [rq:24] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-unknown-linux-gnu"},
{system_arch,"x86_64-unknown-linux-gnu"},
{localtime,{{2014,8,19},{15,37,52}}},
{memory,
[{total,560927296},
{processes,5655168},
{processes_used,5644744},
{system,555272128},
{atom,833185},
{atom_used,825581},
{binary,49584},
{code,7877161},
{ets,648928}]},
{loaded,
[ns_info,log_os_info,couch_config_writer,cb_init_loggers,
mochiweb_acceptor,inet_tcp,gen_tcp,mochiweb_socket,
mochiweb_socket_server,mochilists,mochiweb_http,eval_bits,
couch_httpd,couch_view,couch_set_view_ddoc_cache,
couch_query_servers,couch_spatial,mapreduce,
couch_set_view,snappy,couch_compress,
couch_spatial_validation,couch_set_view_mapreduce,ejson,
couch_doc,couch_db_update_notifier,couch_btree,
couch_ref_counter,couch_uuids,couch_db_updater,couch_db,
couch_auth_cache,couch_db_update_notifier_sup,
couch_secondary_sup,queue,couch_index_barrier,
couch_event_sup,couch_log,couch_rep_sup,httpd_util,
filelib,couch_file,couch_file_write_guard,
couch_task_status,erl_ddll,couch_drv,couch_primary_sup,
couch_server,string,re,file2,couch_util,couch_config,
couch_server_sup,ssl_server,crypto,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,ssl_connection_sup,ssl_session_cache,
ssl_certificate_db,ssl_manager,ssl_broker_sup,ssl_sup,
ssl_app,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets,httpc_manager,httpc,httpc_profile_sup,httpc_sup,
ftp_sup,inets_sup,inets_app,crypto_server,crypto_sup,
crypto_app,couch_app,cb_couch_sup,ns_server_cluster_sup,
mlockall,calendar,ale_default_formatter,otp_internal,misc,
'ale_logger-xdcr','ale_logger-mapreduce_errors',
'ale_logger-views',timer,io_lib_fread,
'ale_logger-cluster','ale_logger-rebalance',
'ale_logger-stats','ale_logger-ns_doctor',
'ale_logger-menelaus','ale_logger-user',
'ale_logger-ns_server','ale_logger-couchdb',ns_log_sink,
disk_log_sup,disk_log_server,disk_log_1,disk_log,
ale_disk_sink,ns_server,cpu_sup,memsup,disksup,os_mon,io,
release_handler,overload,alarm_handler,log_mf_h,sasl,
ale_error_logger_handler,'ale_logger-ale_logger',
'ale_logger-error_logger',beam_opcodes,beam_dict,beam_asm,
beam_validator,beam_flatten,beam_trim,beam_receive,
beam_bsm,beam_peep,beam_dead,beam_type,beam_bool,
beam_clean,beam_utils,beam_jump,beam_block,v3_codegen,
v3_life,v3_kernel,sys_core_dsetel,erl_bifs,sys_core_fold,
cerl_trees,sys_core_inline,core_lib,cerl,v3_core,erl_bits,
erl_expand_records,sys_pre_expand,sofs,erl_internal,sets,
ordsets,erl_lint,compile,dynamic_compile,ale_utils,
io_lib_pretty,io_lib_format,io_lib,ale_codegen,dict,ale,
ale_dynamic_sup,ale_sup,ale_app,ns_bootstrap,child_erlang,
file_io_server,orddict,erl_eval,file,c,kernel_config,
user_sup,supervisor_bridge,standard_error,unicode,binary,
ets,gb_sets,hipe_unified_loader,packages,code_server,code,
file_server,net_kernel,global_group,erl_distribution,
filename,inet_gethost_native,os,inet_parse,inet,inet_udp,
inet_config,inet_db,global,gb_trees,rpc,supervisor,kernel,
application_master,sys,application,gen_server,erl_parse,
proplists,erl_scan,lists,application_controller,proc_lib,
gen,gen_event,error_logger,heart,error_handler,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,init,
otp_ring0]},
{applications,
[{public_key,"Public key infrastructure","0.13"},
{asn1,"The Erlang ASN1 compiler version 1.6.18","1.6.18"},
{lhttpc,"Lightweight HTTP Client","1.3.0"},
{ale,"Another Logger for Erlang","8ca6d2a"},
{os_mon,"CPO CXC 138 46","2.2.7"},
{couch_set_view,"Set views","1.2.0a-a425d97-git"},
{compiler,"ERTS CXC 138 10","4.7.5"},
{inets,"INETS CXC 138 49","5.7.1"},
{couch,"Apache CouchDB","1.2.0a-a425d97-git"},
{mapreduce,"MapReduce using V8 JavaScript engine","1.0.0"},
{couch_index_merger,"Index merger","1.2.0a-a425d97-git"},
{kernel,"ERTS CXC 138 10","2.14.5"},
{crypto,"CRYPTO version 2","2.0.4"},
{ssl,"Erlang/OTP SSL application","4.1.6"},
{sasl,"SASL CXC 138 11","2.1.10"},
{couch_view_parser,"Couch view parser","1.0.0"},
{ns_server,"Couchbase server","2.5.1-1083-rel-enterprise"},
{mochiweb,"MochiMedia Web Server","2.4.2"},
{syntax_tools,"Syntax tools","1.6.7.1"},
{xmerl,"XML parser","1.2.10"},
{oauth,"Erlang OAuth implementation","7d85d3ef"},
{stdlib,"ERTS CXC 138 10","1.17.5"}]},
{pre_loaded,
[erlang,erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
init,otp_ring0]},
{process_count,147},
{node,nonode@nohost},
{nodes,[]},
{registered,
[kernel_safe_sup,couch_db_update_notifier_sup,
couch_auth_cache,couch_rep_sup,os_mon_sup,couch_view,
cpu_sup,couch_server_sup,memsup,disksup,
couch_query_servers,ns_server_cluster_sup,
couch_task_status,couch_log,httpd_sup,couch_httpd,
couch_drv,ssl_connection_sup,couch_file_write_guard,
couch_set_view_ddoc_cache,cb_couch_sup,ssl_manager,
error_logger,couch_index_merger_connection_pool,
sasl_safe_sup,'sink-ns_log','sink-disk_stats',ale_sup,
couch_spatial,standard_error,'sink-disk_xdcr_errors',
'sink-disk_xdcr','sink-disk_debug',standard_error_sup,
ale_dynamic_sup,'sink-disk_couchdb',
'sink-disk_mapreduce_errors','sink-disk_views',
ssl_broker_sup,'sink-disk_error',ssl_server,timer_server,
ssl_sup,ale,httpc_sup,httpc_profile_sup,httpc_manager,
httpc_handler_sup,erl_prim_loader,inet_db,ftp_sup,
sasl_sup,couch_spatial_index_barrier,rex,
couch_replica_index_barrier,kernel_sup,
couch_main_index_barrier,global_name_server,inets_sup,
lhttpc_sup,couch_replication,crypto_server,file_server_2,
crypto_sup,global_group,couch_task_events,
couch_secondary_services,couch_primary_services,
release_handler,couch_db_update,init,overload,
couch_config,alarm_handler,couch_set_view,disk_log_sup,
disk_log_server,couch_server,code_server,couch_uuids,
application_controller,lhttpc_manager,tftp_sup,
'sink-disk_default']},
{cookie,nocookie},
{wordsize,8},
{wall_clock,1}]
[ns_server:info,2014-08-19T15:37:52.354,nonode@nohost:ns_server_cluster_sup<0.161.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," "]
[error_logger:info,2014-08-19T15:37:52.355,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.263.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.357,nonode@nohost:dist_manager<0.264.0>:dist_manager:read_address_config_from_path:83]Reading ip config from "/opt/couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2014-08-19T15:37:52.357,nonode@nohost:dist_manager<0.264.0>:dist_manager:read_address_config_from_path:83]Reading ip config from "/opt/couchbase/var/lib/couchbase/ip"
[ns_server:info,2014-08-19T15:37:52.357,nonode@nohost:dist_manager<0.264.0>:dist_manager:init:159]ip config not found. Looks like we're brand new node
[error_logger:info,2014-08-19T15:37:52.358,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inet_gethost_native_sup}
started: [{pid,<0.266.0>},{mfa,{inet_gethost_native,init,[[]]}}]
[error_logger:info,2014-08-19T15:37:52.358,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.265.0>},
{name,inet_gethost_native_sup},
{mfargs,{inet_gethost_native,start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.488,nonode@nohost:dist_manager<0.264.0>:dist_manager:bringup:230]Attempting to bring up net_kernel with name 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:37:52.491,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.268.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.491,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.269.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.492,ns_1@127.0.0.1:dist_manager<0.264.0>:dist_manager:save_node:143]saving node to "/opt/couchbase/var/lib/couchbase/couchbase-server.node"
[error_logger:info,2014-08-19T15:37:52.492,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.270.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.492,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.267.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T15:37:52.518,ns_1@127.0.0.1:dist_manager<0.264.0>:dist_manager:bringup:238]Attempted to save node name to disk: ok
[error_logger:info,2014-08-19T15:37:52.518,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.264.0>},
{name,dist_manager},
{mfargs,{dist_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.520,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.273.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.523,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.274.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.524,ns_1@127.0.0.1:ns_config_sup<0.275.0>:ns_config_sup:init:32]loading static ns_config from "/opt/couchbase/etc/couchbase/config"
[error_logger:info,2014-08-19T15:37:52.524,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.276.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.524,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.277.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.538,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:load_config:795]Loading static config from "/opt/couchbase/etc/couchbase/config"
[ns_server:info,2014-08-19T15:37:52.538,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:load_config:809]Loading dynamic config from "/opt/couchbase/var/lib/couchbase/config/config.dat"
[ns_server:info,2014-08-19T15:37:52.538,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:load_config:813]No dynamic config file found. Assuming we're brand new node
[ns_server:debug,2014-08-19T15:37:52.541,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:load_config:816]Here's full dynamic config we loaded:
[]
[ns_server:info,2014-08-19T15:37:52.541,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:load_config:827]Here's full dynamic config we loaded + static & default config:
[{replication_topology,star},
{drop_request_memory_threshold_mib,undefined},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{replication,[{enabled,true}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,stream]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{buckets,[{configs,[]}]},
{memory_quota,58026},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{port,11210},
{mccouch_port,11213},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{remote_clusters,[]},
{rest_creds,[{creds,[]}]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{rest,[{port,8091}]},
{{node,'ns_1@127.0.0.1',membership},active},
{nodes_wanted,['ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false}]
[ns_server:info,2014-08-19T15:37:52.542,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_1_7_to_1_7_1:342]Upgrading config from 1.7 to 1.7.1
[ns_server:debug,2014-08-19T15:37:52.543,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{1,7,1}},
{set,email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,
auto_failover_cluster_too_small,ip,disk,overhead,
ep_oom_errors,ep_item_commit_failed]}]},
{set,auto_failover_cfg,
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]}]
[ns_server:info,2014-08-19T15:37:52.543,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_1_7_1_to_1_7_2:353]Upgrading config from 1.7.1 to 1.7.2
[ns_server:debug,2014-08-19T15:37:52.544,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{1,7,2}}]
[ns_server:info,2014-08-19T15:37:52.544,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_1_7_2_to_1_8_0:407]Upgrading config from 1.7.2 to 1.8.0
[ns_server:debug,2014-08-19T15:37:52.546,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{1,8,0}},
{set,{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",
{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",
{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so",
"-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,
port_server_send_eol,stream]}]}]
[ns_server:info,2014-08-19T15:37:52.546,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_1_8_0_to_1_8_1:444]Upgrading config from 1.8.0 to 1.8.1
[ns_server:debug,2014-08-19T15:37:52.547,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{1,8,1}},
{set,
{node,'ns_1@127.0.0.1',memcached},
[{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{port,11210},
{mccouch_port,11213},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{verbosity,[]}]},
{set,
{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"6ac3587b37a4af2a2855762c74f815ea">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{set,
{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{set,
{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"6ac3587b37a4af2a2855762c74f815ea">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]}]
[ns_server:info,2014-08-19T15:37:52.548,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_1_8_1_to_2_0:473]Upgrading config from 1.8.1 to 2.0
[ns_server:debug,2014-08-19T15:37:52.549,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{2,0}},
{set,
{node,'ns_1@127.0.0.1',memcached},
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{set,
{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]}]
[ns_server:info,2014-08-19T15:37:52.550,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_2_0_to_2_2_0:542]Upgrading config from 2.0 to 2.2.0
[ns_server:debug,2014-08-19T15:37:52.551,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{2,2,0}}]
[ns_server:info,2014-08-19T15:37:52.551,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config_default:upgrade_config_from_2_2_0_to_2_3_0:549]Upgrading config from 2.2.0 to 2.3.0
[ns_server:debug,2014-08-19T15:37:52.552,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,{node,'ns_1@127.0.0.1',config_version},{2,3,0}},
{set,
{node,'ns_1@127.0.0.1',memcached},
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]}]
[ns_server:debug,2014-08-19T15:37:52.553,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_init:626]Upgraded initial config:
{config,
{full,"/opt/couchbase/etc/couchbase/config",undefined,ns_config_default},
[[],
[{directory,"/opt/couchbase/var/lib/couchbase/config"},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',membership},active},
{rest,[{port,8091}]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{rest_creds,[{creds,[]}]},
{remote_clusters,[]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{port,11210},
{mccouch_port,11213},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{verbosity,[]}]},
{memory_quota,58026},
{buckets,[{configs,[]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",
{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,
auto_failover_cluster_too_small,ip,disk,overhead,
ep_oom_errors,ep_item_commit_failed]}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{replication,[{enabled,true}]},
{auto_failover_cfg,
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{drop_request_memory_threshold_mib,undefined},
{replication_topology,star}]],
[[{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{'ns_1@127.0.0.1',{7,63575667472}}]}|{2,3,0}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{auto_failover_cfg,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{enabled,false},
{timeout,120},
{max_nodes,1},
{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{buckets,[{configs,[]}]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,
auto_failover_cluster_too_small,ip,disk,overhead,
ep_oom_errors,ep_item_commit_failed]}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memory_quota,58026},
{nodes_wanted,['ns_1@127.0.0.1']},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{replication_topology,star},
{rest,[{port,8091}]},
{rest_creds,[{creds,[]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',membership},active},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',
[{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",
{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091}]],
ns_config_default,
{ns_config,save_config_sync,[]},
undefined,false}
[error_logger:info,2014-08-19T15:37:52.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.278.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/opt/couchbase/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.556,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.281.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.557,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.282.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.586,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.284.0>},
{name,cb_config_couch_sync},
{mfargs,{cb_config_couch_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.586,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.275.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:52.587,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.286.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.592,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.288.0>},
{name,diag_handler_worker},
{mfa,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:52.593,ns_1@127.0.0.1:ns_server_sup<0.287.0>:dir_size:start_link:47]Starting quick version of dir_size with program name: i386-linux-godu
[error_logger:info,2014-08-19T15:37:52.594,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.289.0>},
{name,dir_size},
{mfa,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.595,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.290.0>},
{name,request_throttler},
{mfa,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.597,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.292.0>},
{name,timer2_server},
{mfargs,{timer2,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:warn,2014-08-19T15:37:52.598,ns_1@127.0.0.1:ns_log<0.291.0>:ns_log:read_logs:123]Couldn't load logs from "/opt/couchbase/var/lib/couchbase/ns_log" (perhaps it's first startup): {error,
enoent}
[error_logger:info,2014-08-19T15:37:52.598,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.291.0>},
{name,ns_log},
{mfa,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.598,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.293.0>},
{name,ns_crash_log_consumer},
{mfa,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:52.599,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.294.0>},
{name,ns_config_ets_dup},
{mfa,{ns_config_ets_dup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:52.599,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T15:37:52.600,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T15:37:52.600,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T15:37:52.601,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T15:37:52.601,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_isasl_sync<0.297.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/opt/couchbase/var/lib/couchbase/isasl.pw","_admin",
"f6126ae5fac44bf3d8316165791747f2"]
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_isasl_sync<0.297.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: []
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T15:37:52.602,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T15:37:52.603,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T15:37:52.603,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T15:37:52.603,ns_1@127.0.0.1:ns_config_isasl_sync<0.297.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/isasl.pw"
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T15:37:52.604,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T15:37:52.605,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T15:37:52.605,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T15:37:52.605,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T15:37:52.605,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T15:37:52.608,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T15:37:52.609,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T15:37:52.609,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:warn,2014-08-19T15:37:52.609,ns_1@127.0.0.1:ns_config_isasl_sync<0.297.0>:ns_memcached:connect:1161]Unable to connect: {error,{badmatch,{error,econnrefused}}}, retrying.
[ns_server:debug,2014-08-19T15:37:52.609,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T15:37:52.609,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T15:37:52.610,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T15:37:52.610,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T15:37:52.610,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T15:37:52.610,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T15:37:52.610,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[error_logger:info,2014-08-19T15:37:53.610,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.297.0>},
{name,ns_config_isasl_sync},
{mfa,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.610,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.301.0>},
{name,ns_log_events},
{mfa,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.611,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.303.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.612,ns_1@127.0.0.1:ns_node_disco<0.304.0>:ns_node_disco:init:103]Initting ns_node_disco with []
[ns_server:debug,2014-08-19T15:37:53.612,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[user:info,2014-08-19T15:37:53.612,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_init:86]Initial otp cookie generated: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:37:53.612,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[ns_server:debug,2014-08-19T15:37:53.612,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:37:53.644,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T15:37:53.644,ns_1@127.0.0.1:<0.305.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:37:53.645,ns_1@127.0.0.1:<0.305.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:37:53.645,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.304.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.646,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.308.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.647,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.309.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.648,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:init:66]init pulling
[error_logger:info,2014-08-19T15:37:53.648,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.310.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.648,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:init:68]init pushing
[ns_server:debug,2014-08-19T15:37:53.649,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:init:72]init reannouncing
[ns_server:debug,2014-08-19T15:37:53.649,ns_1@127.0.0.1:ns_config_events<0.276.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2014-08-19T15:37:53.649,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T15:37:53.649,ns_1@127.0.0.1:ns_config_events<0.276.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T15:37:53.649,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[ns_server:debug,2014-08-19T15:37:53.650,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[error_logger:info,2014-08-19T15:37:53.651,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.311.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.651,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.302.0>},
{name,ns_node_disco_sup},
{mfa,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([alert_limits,auto_failover_cfg,autocompaction,
buckets,drop_request_memory_threshold_mib,
email_alerts,fast_warmup,
index_aware_rebalance_disabled,
max_bucket_count,memory_quota,nodes_wanted,otp,
remote_clusters,replication,
replication_topology,rest,rest_creds,
set_view_update_daemon,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{request_limit,capi},
{request_limit,rest},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port},
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
{node,'ns_1@127.0.0.1',ssl_rest_port}]..)
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T15:37:53.651,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2014-08-19T15:37:53.652,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T15:37:53.652,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[error_logger:info,2014-08-19T15:37:53.652,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.317.0>},
{name,vbucket_map_mirror},
{mfa,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T15:37:53.653,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T15:37:53.654,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T15:37:53.654,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T15:37:53.654,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T15:37:53.654,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T15:37:53.654,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T15:37:53.655,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T15:37:53.837,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[error_logger:info,2014-08-19T15:37:53.837,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.319.0>},
{name,bucket_info_cache},
{mfa,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.837,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[error_logger:info,2014-08-19T15:37:53.837,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.322.0>},
{name,ns_tick_event},
{mfa,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.837,ns_1@127.0.0.1:<0.314.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:37:53.837,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:37:53.837,ns_1@127.0.0.1:<0.314.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:37:53.837,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.323.0>},
{name,buckets_events},
{mfa,{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:53.839,ns_1@127.0.0.1:ns_log_events<0.301.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2014-08-19T15:37:53.839,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.325.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:53.840,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.324.0>},
{name,ns_mail_sup},
{mfa,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:53.840,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.326.0>},
{name,ns_stats_event},
{mfa,{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.683,ns_1@127.0.0.1:ns_cookie_manager<0.273.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T15:37:54.684,ns_1@127.0.0.1:<0.315.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:37:54.684,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.327.0>},
{name,samples_loader_tasks},
{mfa,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.684,ns_1@127.0.0.1:<0.315.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:37:54.686,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.328.0>},
{name,ns_heart},
{mfa,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.687,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.330.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[error_logger:info,2014-08-19T15:37:54.690,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.332.0>},
{name,ns_doctor},
{mfa,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.696,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.335.0>},
{name,remote_clusters_info},
{mfa,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.336.0>},
{name,master_activity_events},
{mfa,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.697,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.330.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1},
{proc_lib,init_p_do_apply,3}]}
[ns_server:debug,2014-08-19T15:37:54.699,ns_1@127.0.0.1:ns_server_sup<0.287.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2014-08-19T15:37:54.699,ns_1@127.0.0.1:ns_server_sup<0.287.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2014-08-19T15:37:54.700,ns_1@127.0.0.1:ns_server_sup<0.287.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2014-08-19T15:37:54.702,ns_1@127.0.0.1:mb_master<0.339.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:debug,2014-08-19T15:37:54.708,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.330.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[ns_server:debug,2014-08-19T15:37:54.709,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.330.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1}]}
[ns_server:debug,2014-08-19T15:37:54.714,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
undefined
[ns_server:info,2014-08-19T15:37:54.714,ns_1@127.0.0.1:ns_config<0.278.0>:ns_online_config_upgrader:upgrade_config_on_join_from_pre_2_0_to_2_0:70]Adding some 2.0 specific keys to the config
[ns_server:debug,2014-08-19T15:37:54.714,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,dynamic_config_version,[2,0]},{set,vbucket_map_history,[]}]
[ns_server:debug,2014-08-19T15:37:54.715,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([dynamic_config_version]..)
[ns_server:debug,2014-08-19T15:37:54.717,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T15:37:54.717,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([cluster_compat_version]..)
[ns_server:debug,2014-08-19T15:37:54.717,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T15:37:54.717,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:handle_call:119]Fully synchronized config in 10 us
[user:warn,2014-08-19T15:37:54.717,ns_1@127.0.0.1:<0.346.0>:ns_orchestrator:consider_switching_compat_mode:1051]Changed cluster compat mode from undefined to [2,5]
[ns_server:info,2014-08-19T15:37:54.717,ns_1@127.0.0.1:ns_config<0.278.0>:ns_online_config_upgrader:upgrade_config_from_pre_2_0_to_2_0:74]Performing online config upgrade to 2.0 version
[ns_server:debug,2014-08-19T15:37:54.718,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
undefined
[ns_server:debug,2014-08-19T15:37:54.718,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,dynamic_config_version,[2,0]}]
[ns_server:info,2014-08-19T15:37:54.719,ns_1@127.0.0.1:ns_config<0.278.0>:ns_online_config_upgrader:upgrade_config_from_2_0_to_2_5:78]Performing online config upgrade to 2.5 version
[ns_server:debug,2014-08-19T15:37:54.719,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:do_upgrade_config:577]Upgrading config by changes:
[{set,dynamic_config_version,[2,5]},
{set,server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]}]
[ns_server:debug,2014-08-19T15:37:54.719,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([dynamic_config_version]..)
[ns_server:debug,2014-08-19T15:37:54.719,ns_1@127.0.0.1:mb_master_sup<0.341.0>:misc:start_singleton:986]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.346.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:37:54.719,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.346.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,20},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.721,ns_1@127.0.0.1:mb_master_sup<0.341.0>:misc:start_singleton:986]start_singleton(gen_server, ns_tick, [], []): started as <0.354.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:37:54.721,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.354.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:54.743,ns_1@127.0.0.1:<0.355.0>:auto_failover:init:134]init auto_failover.
[ns_server:debug,2014-08-19T15:37:54.743,ns_1@127.0.0.1:mb_master_sup<0.341.0>:misc:start_singleton:986]start_singleton(gen_server, auto_failover, [], []): started as <0.355.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:37:54.743,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.355.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.743,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.339.0>},
{name,mb_master},
{mfa,{mb_master,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:54.744,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.357.0>},
{name,master_activity_events_ingress},
{mfa,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.744,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.358.0>},
{name,master_activity_events_timestamper},
{mfa,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.744,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.359.0>},
{name,master_activity_events_pids_watcher},
{mfa,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:54.781,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.360.0>},
{name,master_activity_events_keeper},
{mfa,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:58.407,ns_1@127.0.0.1:ns_ssl_services_setup<0.364.0>:ns_server_cert:generate_cert_and_pkey:44]Generated certificate and private key in 3621484 us
[ns_server:debug,2014-08-19T15:37:58.408,ns_1@127.0.0.1:ns_config_rep<0.311.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([cert_and_pkey]..)
[ns_server:debug,2014-08-19T15:37:58.408,ns_1@127.0.0.1:ns_config_log<0.282.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3Qs"...>>,
<<"*****">>}
[error_logger:info,2014-08-19T15:37:58.434,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.364.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:58.436,ns_1@127.0.0.1:ns_ssl_services_setup<0.364.0>:ns_ssl_services_setup:restart_xdcr_proxy:201]Xdcr proxy restart failed. But that's usually normal. {'EXIT',
{{badmatch,
{badrpc,
{'EXIT',
{{case_clause,
false},
[{ns_child_ports_sup,
restart_port_by_name,
1},
{rpc,
'-handle_call_call/6-fun-0-',
5}]}}}},
[{ns_ports_setup,
restart_xdcr_proxy,
0},
{ns_ssl_services_setup,
restart_xdcr_proxy,
0},
{ns_ssl_services_setup,
init,1},
{gen_server,init_it,
6},
{proc_lib,
init_p_do_apply,
3}]}}
[error_logger:info,2014-08-19T15:37:58.461,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.372.0>},
{name,ns_rest_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_rest_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.463,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.389.0>},
{name,ns_capi_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_capi_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.463,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.363.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.465,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.406.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.466,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.407.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.467,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.408.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.468,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.409.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.469,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.426.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.470,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.427.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.473,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.428.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[user:info,2014-08-19T15:37:58.473,ns_1@127.0.0.1:ns_server_sup<0.287.0>:menelaus_sup:start_link:44]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'.
[error_logger:info,2014-08-19T15:37:58.473,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.362.0>},
{name,menelaus},
{mfa,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.474,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.430.0>},
{name,mc_couch_events},
{mfargs,
{gen_event,start_link,[{local,mc_couch_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.475,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.431.0>},
{name,mc_conn_sup},
{mfargs,{mc_conn_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,supervisor}]
[ns_server:info,2014-08-19T15:37:58.476,ns_1@127.0.0.1:<0.432.0>:mc_tcp_listener:init:24]mccouch is listening on port 11213
[error_logger:info,2014-08-19T15:37:58.476,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.432.0>},
{name,mc_tcp_listener},
{mfargs,{mc_tcp_listener,start_link,[11213]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.476,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.429.0>},
{name,mc_sup},
{mfa,{mc_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.476,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.433.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.476,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.434.0>},
{name,ns_port_memcached_killer},
{mfa,{ns_ports_setup,start_memcached_force_killer,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:58.477,ns_1@127.0.0.1:<0.436.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/opt/couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2014-08-19T15:37:58.477,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.436.0>},
{name,ns_memcached_log_rotator},
{mfa,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.481,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.438.0>},
{name,memcached_clients_pool},
{mfa,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.482,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.439.0>},
{name,proxied_memcached_clients_pool},
{mfa,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.483,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.440.0>},
{name,xdc_lhttpc_pool},
{mfa,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.483,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.441.0>},
{name,ns_null_connection_pool},
{mfa,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.483,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.442.0>},
{name,xdc_replication_sup},
{mfa,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.510,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.443.0>},
{name,xdc_rep_manager},
{mfa,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.511,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.452.0>},
{name,ns_memcached_sockets_pool},
{mfa,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.516,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.455.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.517,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.457.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.517,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.456.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.517,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.454.0>},
{name,ns_bucket_worker_sup},
{mfa,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.518,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.458.0>},
{name,system_stats_collector},
{mfa,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.519,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.461.0>},
{name,{stats_archiver,"@system"}},
{mfa,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.519,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.463.0>},
{name,{stats_reader,"@system"}},
{mfa,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:58.522,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[error_logger:info,2014-08-19T15:37:58.522,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.464.0>},
{name,compaction_daemon},
{mfa,{compaction_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:37:58.523,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:37:58.524,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.466.0>:xdc_rdoc_replication_srv:init:76]Loaded the following docs:
[]
[ns_server:debug,2014-08-19T15:37:58.524,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.466.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[error_logger:info,2014-08-19T15:37:58.524,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.466.0>},
{name,xdc_rdoc_replication_srv},
{mfa,{xdc_rdoc_replication_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:37:58.525,ns_1@127.0.0.1:set_view_update_daemon<0.468.0>:set_view_update_daemon:init:50]Set view update daemon, starting with the following settings:
update interval: 5000ms
minimum number of changes: 5000
[ns_server:debug,2014-08-19T15:37:58.525,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:104]Entered child_loop
[error_logger:info,2014-08-19T15:37:58.525,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.468.0>},
{name,set_view_update_daemon},
{mfa,{set_view_update_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:37:58.525,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.287.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:37:58.526,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: ns_server
started_at: 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T15:37:59.687,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.330.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{error,no_samples}
[ns_server:debug,2014-08-19T15:38:28.524,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:38:28.524,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:38:58.525,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:38:58.525,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:39:28.526,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:39:28.526,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:39:58.527,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:39:58.527,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:40:28.528,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:40:28.528,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:40:58.529,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:40:58.529,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:41:28.530,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:41:28.530,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:41:58.531,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:41:58.531,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:42:28.532,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:42:28.532,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:42:58.533,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:42:58.533,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:43:28.534,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:43:28.534,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:43:58.535,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:43:58.535,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:44:28.536,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:44:28.536,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:44:58.537,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:44:58.537,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:45:28.538,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:45:28.538,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:45:58.539,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:45:58.540,ns_1@127.0.0.1:compaction_daemon<0.464.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[user:info,2014-08-19T15:46:25.248,ns_1@127.0.0.1:<0.293.0>:ns_log:crash_consumption_loop:64]Port server memcached on node 'babysitter_of_ns_1@127.0.0.1' exited with status 0. Restarting. Messages: EOL on stdin. Initiating shutdown
[user:info,2014-08-19T15:46:25.249,ns_1@127.0.0.1:<0.293.0>:ns_log:crash_consumption_loop:64]Port server moxi on node 'babysitter_of_ns_1@127.0.0.1' exited with status 0. Restarting. Messages: EOL on stdin. Exiting
[ns_server:debug,2014-08-19T15:46:25.249,ns_1@127.0.0.1:<0.435.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.433.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.249,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:108]Got EOL
[error_logger:info,2014-08-19T15:46:25.249,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.1886.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:25.250,ns_1@127.0.0.1:<0.2.0>:ns_bootstrap:stop:41]Initiated server shutdown
[error_logger:info,2014-08-19T15:46:25.250,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]Initiated server shutdown
[ns_server:debug,2014-08-19T15:46:25.250,ns_1@127.0.0.1:<0.469.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.468.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.250,ns_1@127.0.0.1:<0.465.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.464.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.463,ns_1@127.0.0.1:<0.462.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_stats_event,<0.461.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.464,ns_1@127.0.0.1:<0.460.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_tick_event,<0.458.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.464,ns_1@127.0.0.1:<0.457.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.456.0>} exited with reason shutdown
[error_logger:error,2014-08-19T15:46:25.464,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_bucket_sup}
Context: shutdown_error
Reason: normal
Offender: [{pid,<0.457.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:25.464,ns_1@127.0.0.1:<0.1887.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.1886.0>} exited with reason killed
[ns_server:debug,2014-08-19T15:46:25.464,ns_1@127.0.0.1:<0.437.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.434.0>} exited with reason killed
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.365.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.364.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.361.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {master_activity_events,<0.360.0>} exited with reason killed
[ns_server:info,2014-08-19T15:46:25.465,ns_1@127.0.0.1:mb_master<0.339.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.340.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.339.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.333.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.332.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.329.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {buckets_events,<0.328.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.321.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.319.0>} exited with reason killed
[ns_server:debug,2014-08-19T15:46:25.465,ns_1@127.0.0.1:<0.312.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events_local,<0.311.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.466,ns_1@127.0.0.1:<0.318.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.317.0>} exited with reason killed
[ns_server:debug,2014-08-19T15:46:25.466,ns_1@127.0.0.1:<0.298.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.297.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.466,ns_1@127.0.0.1:<0.295.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.294.0>} exited with reason killed
[error_logger:error,2014-08-19T15:46:25.467,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: gen_event:init_it/6
pid: <0.320.0>
registered_name: bucket_info_cache_invalidations
exception exit: killed
in function gen_event:terminate_server/4
ancestors: [bucket_info_cache,ns_server_sup,ns_server_cluster_sup,
<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 233
stack_size: 24
reductions: 119
neighbours:
[error_logger:error,2014-08-19T15:46:25.568,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_server_cluster_sup}
Context: shutdown_error
Reason: killed
Offender: [{pid,<0.286.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:25.568,ns_1@127.0.0.1:<0.285.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.284.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.568,ns_1@127.0.0.1:<0.283.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.282.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T15:46:25.568,ns_1@127.0.0.1:ns_config<0.278.0>:ns_config:wait_saver:652]Done waiting for saver.
[error_logger:error,2014-08-19T15:46:25.570,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_file:spawn_writer/2
pid: <0.225.0>
registered_name: []
exception exit: {noproc,
{gen_server,call,
[couch_file_write_guard,
{remove,<0.225.0>},
infinity]}}
in function gen_server:call/3
in call from couch_file:writer_loop/4
ancestors: [<0.222.0>,couch_server,couch_primary_services,
couch_server_sup,cb_couch_sup,ns_server_cluster_sup,
<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 233
stack_size: 24
reductions: 2139
neighbours:
[error_logger:error,2014-08-19T15:46:25.571,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_file:spawn_writer/2
pid: <0.447.0>
registered_name: []
exception exit: {noproc,
{gen_server,call,
[couch_file_write_guard,
{remove,<0.447.0>},
infinity]}}
in function gen_server:call/3
in call from couch_file:writer_loop/4
ancestors: [<0.444.0>,couch_server,couch_primary_services,
couch_server_sup,cb_couch_sup,ns_server_cluster_sup,
<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 377
stack_size: 24
reductions: 783
neighbours:
[error_logger:error,2014-08-19T15:46:25.571,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Generic server <0.226.0> terminating
** Last message in was {'EXIT',<0.211.0>,killed}
** When Server state == {db,<0.226.0>,<0.227.0>,nil,<<"1408448272284601">>,
<0.222.0>,<0.228.0>,
{db_header,11,1,
<<0,0,0,0,13,103,0,0,0,0,0,51,0,0,0,0,1,0,0,0,
0,0,0,0,0,0,13,69>>,
<<0,0,0,0,13,154,0,0,0,0,0,49,0,0,0,0,1>>,
nil,0,nil,nil},
1,
{btree,<0.222.0>,
{3431,
<<0,0,0,0,1,0,0,0,0,0,0,0,0,0,13,69>>,
51},
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.222.0>,
{3482,<<0,0,0,0,1>>,49},
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.222.0>,nil,identity,identity,
#Fun,nil,1279,2558,
true},
1,<<"_users">>,
"/opt/couchbase/var/lib/couchbase/data/_users.couch.1",
[],nil,
{user_ctx,null,[],undefined},
nil,
[before_header,after_header,on_file_open],
[create,
{user_ctx,
{user_ctx,null,[<<"_admin">>],undefined}},
sys_db]}
** Reason for termination ==
** killed
[error_logger:error,2014-08-19T15:46:25.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_db:init/1
pid: <0.226.0>
registered_name: []
exception exit: killed
in function gen_server:terminate/6
ancestors: [couch_server,couch_primary_services,couch_server_sup,
cb_couch_sup,ns_server_cluster_sup,<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 1597
stack_size: 24
reductions: 285
neighbours:
[error_logger:error,2014-08-19T15:46:25.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Generic server <0.448.0> terminating
** Last message in was {'EXIT',<0.211.0>,killed}
** When Server state == {db,<0.448.0>,<0.449.0>,nil,<<"1408448278509999">>,
<0.444.0>,<0.450.0>,
{db_header,11,0,nil,nil,nil,0,nil,nil},
0,
{btree,<0.444.0>,nil,
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.444.0>,nil,
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.444.0>,nil,identity,identity,
#Fun,nil,1279,2558,
true},
0,<<"_replicator">>,
"/opt/couchbase/var/lib/couchbase/data/_replicator.couch.1",
[],nil,
{user_ctx,null,[],undefined},
nil,
[before_header,after_header,on_file_open],
[create,sys_db,
{user_ctx,
{user_ctx,null,
[<<"_admin">>,<<"_replicator">>],
undefined}}]}
** Reason for termination ==
** killed
[error_logger:error,2014-08-19T15:46:25.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_db:init/1
pid: <0.448.0>
registered_name: []
exception exit: killed
in function gen_server:terminate/6
ancestors: [couch_server,couch_primary_services,couch_server_sup,
cb_couch_sup,ns_server_cluster_sup,<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 24
reductions: 249
neighbours:
[error_logger:info,2014-08-19T15:46:25.578,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================INFO REPORT=========================
application: ns_server
exited: stopped
type: permanent
[ns_server:info,2014-08-19T15:46:30.362,nonode@nohost:<0.58.0>:ns_server:init_logging:248]Started & configured logging
[ns_server:info,2014-08-19T15:46:30.365,nonode@nohost:<0.58.0>:ns_server:log_pending:30]Static config terms:
[{error_logger_mf_dir,"/opt/couchbase/var/lib/couchbase/logs"},
{error_logger_mf_maxbytes,10485760},
{error_logger_mf_maxfiles,20},
{path_config_bindir,"/opt/couchbase/bin"},
{path_config_etcdir,"/opt/couchbase/etc/couchbase"},
{path_config_libdir,"/opt/couchbase/lib"},
{path_config_datadir,"/opt/couchbase/var/lib/couchbase"},
{path_config_tmpdir,"/opt/couchbase/var/lib/couchbase/tmp"},
{nodefile,"/opt/couchbase/var/lib/couchbase/couchbase-server.node"},
{loglevel_default,debug},
{loglevel_couchdb,info},
{loglevel_ns_server,debug},
{loglevel_error_logger,debug},
{loglevel_user,debug},
{loglevel_menelaus,debug},
{loglevel_ns_doctor,debug},
{loglevel_stats,debug},
{loglevel_rebalance,debug},
{loglevel_cluster,debug},
{loglevel_views,debug},
{loglevel_mapreduce_errors,debug},
{loglevel_xdcr,debug}]
[ns_server:info,2014-08-19T15:46:30.515,nonode@nohost:<0.58.0>:ns_server:start:58]Locked myself into a memory successfully.
[error_logger:info,2014-08-19T15:46:30.551,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,crypto_sup}
started: [{pid,<0.166.0>},
{name,crypto_server},
{mfargs,{crypto_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.552,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: crypto
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.560,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: asn1
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.564,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: public_key
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.570,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.173.0>},
{name,ftp_sup},
{mfargs,{ftp_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.589,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_profile_sup}
started: [{pid,<0.176.0>},
{name,httpc_manager},
{mfargs,
{httpc_manager,start_link,
[default,only_session_cookies,inets]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.590,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_sup}
started: [{pid,<0.175.0>},
{name,httpc_profile_sup},
{mfargs,
{httpc_profile_sup,start_link,
[[{httpc,{default,only_session_cookies}}]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.592,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,httpc_sup}
started: [{pid,<0.177.0>},
{name,httpc_handler_sup},
{mfargs,{httpc_handler_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.592,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.174.0>},
{name,httpc_sup},
{mfargs,
{httpc_sup,start_link,
[[{httpc,{default,only_session_cookies}}]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.595,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.178.0>},
{name,httpd_sup},
{mfargs,{httpd_sup,start_link,[[]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.598,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inets_sup}
started: [{pid,<0.179.0>},
{name,tftp_sup},
{mfargs,{tftp_sup,start_link,[[]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.598,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: inets
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.598,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: oauth
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.608,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.185.0>},
{name,ssl_broker_sup},
{mfargs,{ssl_broker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.615,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.186.0>},
{name,ssl_manager},
{mfargs,{ssl_manager,start_link,[[]]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.617,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.187.0>},
{name,ssl_connection},
{mfargs,{ssl_connection_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,4000},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.617,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: ssl
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.770,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ssl_sup}
started: [{pid,<0.194.0>},
{name,ssl_server},
{mfargs,{ssl_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.770,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,lhttpc_sup}
started: [{pid,<0.192.0>},
{name,lhttpc_manager},
{mfargs,
{lhttpc_manager,start_link,
[[{name,lhttpc_manager}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.770,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: lhttpc
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.775,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: xmerl
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.790,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: compiler
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.796,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: syntax_tools
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.796,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: mochiweb
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.800,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_view_parser
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.804,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_set_view
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.808,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_index_merger
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.811,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: mapreduce
started_at: nonode@nohost
[error_logger:info,2014-08-19T15:46:30.848,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.203.0>},
{name,couch_config},
{mfargs,
{couch_server_sup,couch_config_start_link_wrapper,
[["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/default.d/capi.ini",
"/opt/couchbase/etc/couchdb/default.d/geocouch.ini",
"/opt/couchbase/etc/couchdb/local.ini"],
<0.203.0>]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.871,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.206.0>},
{name,collation_driver},
{mfargs,{couch_drv,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.872,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.207.0>},
{name,couch_task_events},
{mfargs,
{gen_event,start_link,[{local,couch_task_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.873,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.208.0>},
{name,couch_task_status},
{mfargs,{couch_task_status,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.875,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.209.0>},
{name,couch_file_write_guard},
{mfargs,{couch_file_write_guard,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.897,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.210.0>},
{name,couch_server},
{mfargs,{couch_server,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.897,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.223.0>},
{name,couch_db_update_event},
{mfargs,
{gen_event,start_link,[{local,couch_db_update}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.898,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.224.0>},
{name,couch_replication_event},
{mfargs,
{gen_event,start_link,[{local,couch_replication}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.898,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.225.0>},
{name,couch_replication_supervisor},
{mfargs,{couch_rep_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.900,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.226.0>},
{name,couch_log},
{mfargs,{couch_log,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.903,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.227.0>},
{name,couch_main_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_main_index_barrier,
"max_parallel_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.904,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.228.0>},
{name,couch_replica_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_replica_index_barrier,
"max_parallel_replica_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.904,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.229.0>},
{name,couch_spatial_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_spatial_index_barrier,
"max_parallel_spatial_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.904,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.205.0>},
{name,couch_primary_services},
{mfargs,{couch_primary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.907,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.231.0>},
{name,couch_db_update_notifier_sup},
{mfargs,{couch_db_update_notifier_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.914,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.232.0>},
{name,auth_cache},
{mfargs,{couch_auth_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.923,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.235.0>},
{name,set_view_manager},
{mfargs,{couch_set_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.925,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.238.0>},
{name,spatial_manager},
{mfargs,{couch_spatial,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.925,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.240.0>},
{name,index_merger_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{connection_timeout,90000},
{pool_size,10000},
{name,couch_index_merger_connection_pool}]]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.928,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.241.0>},
{name,query_servers},
{mfargs,{couch_query_servers,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.930,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.243.0>},
{name,couch_set_view_ddoc_cache},
{mfargs,{couch_set_view_ddoc_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.934,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.245.0>},
{name,view_manager},
{mfargs,{couch_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.947,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.247.0>},
{name,httpd},
{mfargs,{couch_httpd,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.948,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.264.0>},
{name,uuids},
{mfargs,{couch_uuids,start,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:30.948,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.230.0>},
{name,couch_secondary_services},
{mfargs,{couch_secondary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.949,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,cb_couch_sup}
started: [{pid,<0.204.0>},
{name,couch_app},
{mfargs,
{couch_app,start,
[fake,
["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/local.ini"]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:30.949,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.161.0>},
{name,cb_couch_sup},
{mfargs,{cb_couch_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,supervisor}]
[ns_server:info,2014-08-19T15:46:30.956,nonode@nohost:ns_server_cluster_sup<0.160.0>:log_os_info:start_link:25]OS type: {unix,linux} Version: {2,6,32}
Runtime info: [{otp_release,"R14B04"},
{erl_version,"5.8.5"},
{erl_version_long,
"Erlang R14B04 (erts-5.8.5) [source] [64-bit] [smp:24:24] [rq:24] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-unknown-linux-gnu"},
{system_arch,"x86_64-unknown-linux-gnu"},
{localtime,{{2014,8,19},{15,46,30}}},
{memory,
[{total,560723616},
{processes,5526464},
{processes_used,5517808},
{system,555197152},
{atom,830761},
{atom_used,821985},
{binary,52944},
{code,7808288},
{ets,644944}]},
{loaded,
[ns_info,log_os_info,couch_config_writer,cb_init_loggers,
couch_uuids,mochiweb_acceptor,inet_tcp,gen_tcp,
mochiweb_socket,mochiweb_socket_server,mochilists,
mochiweb_http,eval_bits,couch_httpd,couch_view,
couch_set_view_ddoc_cache,couch_query_servers,
couch_spatial,mapreduce,couch_set_view,
couch_db_update_notifier,snappy,couch_compress,
couch_auth_cache,couch_db_update_notifier_sup,
couch_secondary_sup,queue,couch_index_barrier,
couch_event_sup,couch_log,couch_rep_sup,couch_btree,
couch_ref_counter,couch_db_updater,couch_db,httpd_util,
filelib,couch_file,couch_file_write_guard,
couch_task_status,erl_ddll,couch_drv,couch_primary_sup,
couch_server,string,re,file2,couch_util,couch_config,
couch_server_sup,ssl_server,crypto,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,ssl_connection_sup,ssl_session_cache,
ssl_certificate_db,ssl_manager,ssl_broker_sup,ssl_sup,
ssl_app,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets,httpc_manager,httpc,httpc_profile_sup,httpc_sup,
ftp_sup,inets_sup,inets_app,crypto_server,crypto_sup,
crypto_app,couch_app,cb_couch_sup,ns_server_cluster_sup,
mlockall,calendar,ale_default_formatter,otp_internal,misc,
'ale_logger-xdcr','ale_logger-mapreduce_errors',
'ale_logger-views','ale_logger-cluster',timer,
io_lib_fread,'ale_logger-rebalance','ale_logger-stats',
'ale_logger-ns_doctor','ale_logger-menelaus',
'ale_logger-user','ale_logger-ns_server',
'ale_logger-couchdb',ns_log_sink,disk_log_sup,
disk_log_server,disk_log_1,disk_log,ale_disk_sink,
ns_server,cpu_sup,memsup,disksup,os_mon,io,
release_handler,overload,alarm_handler,log_mf_h,sasl,
ale_error_logger_handler,'ale_logger-ale_logger',
'ale_logger-error_logger',beam_opcodes,beam_dict,beam_asm,
beam_validator,beam_flatten,beam_trim,beam_receive,
beam_bsm,beam_peep,beam_dead,beam_type,beam_bool,
beam_clean,beam_utils,beam_jump,beam_block,v3_codegen,
v3_life,v3_kernel,sys_core_dsetel,erl_bifs,sys_core_fold,
cerl_trees,sys_core_inline,core_lib,cerl,v3_core,erl_bits,
erl_expand_records,sys_pre_expand,sofs,erl_internal,sets,
ordsets,erl_lint,compile,dynamic_compile,ale_utils,
io_lib_pretty,io_lib_format,io_lib,ale_codegen,dict,ale,
ale_dynamic_sup,ale_sup,ale_app,ns_bootstrap,child_erlang,
file_io_server,orddict,erl_eval,file,c,kernel_config,
user_sup,supervisor_bridge,standard_error,unicode,binary,
ets,gb_sets,hipe_unified_loader,packages,code_server,code,
file_server,net_kernel,global_group,erl_distribution,
filename,inet_gethost_native,os,inet_parse,inet,inet_udp,
inet_config,inet_db,global,gb_trees,rpc,supervisor,kernel,
application_master,sys,application,gen_server,erl_parse,
proplists,erl_scan,lists,application_controller,proc_lib,
gen,gen_event,error_logger,heart,error_handler,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,init,
otp_ring0]},
{applications,
[{public_key,"Public key infrastructure","0.13"},
{asn1,"The Erlang ASN1 compiler version 1.6.18","1.6.18"},
{lhttpc,"Lightweight HTTP Client","1.3.0"},
{ale,"Another Logger for Erlang","8ca6d2a"},
{os_mon,"CPO CXC 138 46","2.2.7"},
{couch_set_view,"Set views","1.2.0a-a425d97-git"},
{compiler,"ERTS CXC 138 10","4.7.5"},
{inets,"INETS CXC 138 49","5.7.1"},
{couch,"Apache CouchDB","1.2.0a-a425d97-git"},
{mapreduce,"MapReduce using V8 JavaScript engine","1.0.0"},
{couch_index_merger,"Index merger","1.2.0a-a425d97-git"},
{kernel,"ERTS CXC 138 10","2.14.5"},
{crypto,"CRYPTO version 2","2.0.4"},
{ssl,"Erlang/OTP SSL application","4.1.6"},
{sasl,"SASL CXC 138 11","2.1.10"},
{couch_view_parser,"Couch view parser","1.0.0"},
{ns_server,"Couchbase server","2.5.1-1083-rel-enterprise"},
{mochiweb,"MochiMedia Web Server","2.4.2"},
{syntax_tools,"Syntax tools","1.6.7.1"},
{xmerl,"XML parser","1.2.10"},
{oauth,"Erlang OAuth implementation","7d85d3ef"},
{stdlib,"ERTS CXC 138 10","1.17.5"}]},
{pre_loaded,
[erlang,erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
init,otp_ring0]},
{process_count,152},
{node,nonode@nohost},
{nodes,[]},
{registered,
[ssl_sup,couch_file_write_guard,global_group,
lhttpc_manager,tftp_sup,ale_sup,lhttpc_sup,httpc_sup,
disk_log_sup,ale_dynamic_sup,disk_log_server,
erl_prim_loader,httpc_profile_sup,os_mon_sup,
httpc_manager,code_server,ns_server_cluster_sup,
httpc_handler_sup,sasl_sup,'sink-ns_log',cpu_sup,
'sink-disk_stats',ftp_sup,couch_db_update_notifier_sup,
memsup,application_controller,'sink-disk_xdcr_errors',
disksup,ale,'sink-disk_xdcr',error_logger,
standard_error_sup,standard_error,'sink-disk_debug',
couch_log,'sink-disk_couchdb',
'sink-disk_mapreduce_errors',couch_auth_cache,
'sink-disk_views',inets_sup,couch_rep_sup,
'sink-disk_error',crypto_server,timer_server,crypto_sup,
couch_view,cb_couch_sup,ssl_connection_sup,
release_handler,couch_server_sup,couch_secondary_services,
ssl_manager,couch_primary_services,overload,
couch_db_update,couch_spatial_index_barrier,
couch_replica_index_barrier,couch_query_servers,
alarm_handler,httpd_sup,couch_set_view,
couch_set_view_ddoc_cache,kernel_safe_sup,couch_config,
couch_main_index_barrier,rex,inet_db,couch_task_status,
couch_replication,couch_index_merger_connection_pool,
'sink-disk_default',kernel_sup,global_name_server,
couch_spatial,ssl_broker_sup,couch_task_events,
couch_server,couch_httpd,file_server_2,init,sasl_safe_sup,
ssl_server,couch_drv,couch_uuids]},
{cookie,nocookie},
{wordsize,8},
{wall_clock,1}]
[ns_server:info,2014-08-19T15:46:30.960,nonode@nohost:ns_server_cluster_sup<0.160.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," "]
[error_logger:info,2014-08-19T15:46:30.962,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.266.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:30.964,nonode@nohost:dist_manager<0.267.0>:dist_manager:read_address_config_from_path:83]Reading ip config from "/opt/couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2014-08-19T15:46:30.964,nonode@nohost:dist_manager<0.267.0>:dist_manager:read_address_config_from_path:83]Reading ip config from "/opt/couchbase/var/lib/couchbase/ip"
[ns_server:info,2014-08-19T15:46:30.964,nonode@nohost:dist_manager<0.267.0>:dist_manager:init:159]ip config not found. Looks like we're brand new node
[error_logger:info,2014-08-19T15:46:30.964,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,inet_gethost_native_sup}
started: [{pid,<0.269.0>},{mfa,{inet_gethost_native,init,[[]]}}]
[error_logger:info,2014-08-19T15:46:30.964,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.268.0>},
{name,inet_gethost_native_sup},
{mfargs,{inet_gethost_native,start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:31.097,nonode@nohost:dist_manager<0.267.0>:dist_manager:bringup:230]Attempting to bring up net_kernel with name 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:46:31.100,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.271.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.100,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.272.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.101,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.273.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:31.101,ns_1@127.0.0.1:dist_manager<0.267.0>:dist_manager:save_node:143]saving node to "/opt/couchbase/var/lib/couchbase/couchbase-server.node"
[error_logger:info,2014-08-19T15:46:31.102,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.270.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T15:46:31.132,ns_1@127.0.0.1:dist_manager<0.267.0>:dist_manager:bringup:238]Attempted to save node name to disk: ok
[error_logger:info,2014-08-19T15:46:31.133,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.267.0>},
{name,dist_manager},
{mfargs,{dist_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.134,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.276.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.136,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.277.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:31.137,ns_1@127.0.0.1:ns_config_sup<0.278.0>:ns_config_sup:init:32]loading static ns_config from "/opt/couchbase/etc/couchbase/config"
[error_logger:info,2014-08-19T15:46:31.137,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.279.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.137,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.280.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:31.150,ns_1@127.0.0.1:ns_config<0.281.0>:ns_config:load_config:795]Loading static config from "/opt/couchbase/etc/couchbase/config"
[ns_server:info,2014-08-19T15:46:31.150,ns_1@127.0.0.1:ns_config<0.281.0>:ns_config:load_config:809]Loading dynamic config from "/opt/couchbase/var/lib/couchbase/config/config.dat"
[ns_server:debug,2014-08-19T15:46:31.152,ns_1@127.0.0.1:ns_config<0.281.0>:ns_config:load_config:816]Here's full dynamic config we loaded:
[[{cert_and_pkey,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667478}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3QsYDNvapLjSSawb2oEl8ssA4mmR\ne2P+F4r1j3FAsOsO0VOuKtmsul6utqBCmO34s0vYc6X58RbQVYx8iu5XiTFu5rTi\nFbuHeJ+rjVi4gMxuD4yVIkTJq4KED+p1SkD9H4YvUWy5O7XlmPsA30fmdMpKsZWi\n6QIDAQABowIwADALBgkqhkiG9w0BAQUDggEBADSaYJBLzwuTm8X5KVmfNhrblZTL\n3Lc/PewFJZvp3UuiF6xJQdQMO9mvLZ6MaY/Z4NL/sLionbrmQuGxxChpTwyLNL7a\n666VquUle7zrVYOJKlv/2hgFjk1rhfD0JpqwKFaRTYyMqBRG7hXkPlPZPFJVeAft\ntvYLLJc5Iou4tvQvw3lB6F3g2jpzW4UQMXKklf3c0pZqYKCNYvEt7elnIyS/Aata\nFViP8384q9BMsSeoyj/mDfV4czbAwYgZN5ZRylM+IElGWNZVBydbBQaGJgj3yJD3\n3+2X3gSf7HN33p4dPCEeNBKnL0vBdS3GPkDibxHzKv5J3euds09QGtsK4BQ=\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{server_groups,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},
[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{dynamic_config_version,
[{'_vclock',[{'ns_1@127.0.0.1',{5,63575667474}}]},2,5]},
{cluster_compat_version,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},2,5]},
{vbucket_map_history,[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]}]},
{otp,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667473}}]},
{cookie,alkbqedpsntmtnxa}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{'ns_1@127.0.0.1',{7,63575667472}}]}|{2,3,0}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{auto_failover_cfg,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{enabled,false},
{timeout,120},
{max_nodes,1},
{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{buckets,[{configs,[]}]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memory_quota,58026},
{nodes_wanted,['ns_1@127.0.0.1']},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{replication_topology,star},
{rest,[{port,8091}]},
{rest_creds,[{creds,[]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',membership},active},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',
[{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091}]]
[ns_server:info,2014-08-19T15:46:31.154,ns_1@127.0.0.1:ns_config<0.281.0>:ns_config:load_config:827]Here's full dynamic config we loaded + static & default config:
[{{node,'ns_1@127.0.0.1',ssl_rest_port},18091},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,stream]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',
[{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',membership},active},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{rest_creds,[{creds,[]}]},
{rest,[{port,8091}]},
{replication_topology,star},
{replication,[{enabled,true}]},
{remote_clusters,[]},
{nodes_wanted,['ns_1@127.0.0.1']},
{memory_quota,58026},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{email_alerts,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]},
{drop_request_memory_threshold_mib,undefined},
{buckets,[{configs,[]}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{auto_failover_cfg,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{enabled,false},
{timeout,120},
{max_nodes,1},
{count,0}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{'ns_1@127.0.0.1',{7,63575667472}}]}|{2,3,0}]},
{otp,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667473}}]},
{cookie,alkbqedpsntmtnxa}]},
{vbucket_map_history,[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]}]},
{cluster_compat_version,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},2,5]},
{dynamic_config_version,
[{'_vclock',[{'ns_1@127.0.0.1',{5,63575667474}}]},2,5]},
{server_groups,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},
[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{cert_and_pkey,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667478}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3QsYDNvapLjSSawb2oEl8ssA4mmR\ne2P+F4r1j3FAsOsO0VOuKtmsul6utqBCmO34s0vYc6X58RbQVYx8iu5XiTFu5rTi\nFbuHeJ+rjVi4gMxuD4yVIkTJq4KED+p1SkD9H4YvUWy5O7XlmPsA30fmdMpKsZWi\n6QIDAQABowIwADALBgkqhkiG9w0BAQUDggEBADSaYJBLzwuTm8X5KVmfNhrblZTL\n3Lc/PewFJZvp3UuiF6xJQdQMO9mvLZ6MaY/Z4NL/sLionbrmQuGxxChpTwyLNL7a\n666VquUle7zrVYOJKlv/2hgFjk1rhfD0JpqwKFaRTYyMqBRG7hXkPlPZPFJVeAft\ntvYLLJc5Iou4tvQvw3lB6F3g2jpzW4UQMXKklf3c0pZqYKCNYvEt7elnIyS/Aata\nFViP8384q9BMsSeoyj/mDfV4czbAwYgZN5ZRylM+IElGWNZVBydbBQaGJgj3yJD3\n3+2X3gSf7HN33p4dPCEeNBKnL0vBdS3GPkDibxHzKv5J3euds09QGtsK4BQ=\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]}]
[error_logger:info,2014-08-19T15:46:31.156,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.281.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/opt/couchbase/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.157,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.283.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.158,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.284.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.159,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.286.0>},
{name,cb_config_couch_sync},
{mfargs,{cb_config_couch_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.159,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.278.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:31.160,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.288.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.166,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.290.0>},
{name,diag_handler_worker},
{mfa,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:31.166,ns_1@127.0.0.1:ns_server_sup<0.289.0>:dir_size:start_link:47]Starting quick version of dir_size with program name: i386-linux-godu
[error_logger:info,2014-08-19T15:46:31.167,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.291.0>},
{name,dir_size},
{mfa,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.168,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.292.0>},
{name,request_throttler},
{mfa,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.171,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.294.0>},
{name,timer2_server},
{mfargs,{timer2,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.171,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.293.0>},
{name,ns_log},
{mfa,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.172,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.295.0>},
{name,ns_crash_log_consumer},
{mfa,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:31.173,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.296.0>},
{name,ns_config_ets_dup},
{mfa,{ns_config_ets_dup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:31.173,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T15:46:31.173,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T15:46:31.173,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T15:46:31.174,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[ns_server:debug,2014-08-19T15:46:31.174,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3Qs"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T15:46:31.174,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T15:46:31.175,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T15:46:31.175,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T15:46:31.175,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:debug,2014-08-19T15:46:31.176,ns_1@127.0.0.1:ns_config_isasl_sync<0.299.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/opt/couchbase/var/lib/couchbase/isasl.pw","_admin",
"f6126ae5fac44bf3d8316165791747f2"]
[ns_server:info,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_isasl_sync<0.299.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: []
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[]
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_isasl_sync<0.299.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/isasl.pw"
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T15:46:31.178,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T15:46:31.179,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:warn,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_isasl_sync<0.299.0>:ns_memcached:connect:1161]Unable to connect: {error,{badmatch,{error,econnrefused}}}, retrying.
[ns_server:debug,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T15:46:31.183,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T15:46:31.184,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[error_logger:info,2014-08-19T15:46:32.184,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.299.0>},
{name,ns_config_isasl_sync},
{mfa,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.184,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.303.0>},
{name,ns_log_events},
{mfa,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.185,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.305.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.187,ns_1@127.0.0.1:ns_node_disco<0.306.0>:ns_node_disco:init:103]Initting ns_node_disco with []
[ns_server:debug,2014-08-19T15:46:32.187,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[user:info,2014-08-19T15:46:32.187,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_sync:130]Node 'ns_1@127.0.0.1' synchronized otp cookie alkbqedpsntmtnxa from cluster
[ns_server:debug,2014-08-19T15:46:32.187,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:46:32.216,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T15:46:32.216,ns_1@127.0.0.1:<0.307.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:46:32.217,ns_1@127.0.0.1:<0.307.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:46:32.217,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.306.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.218,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.309.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.219,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.310.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.220,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.311.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.221,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:init:66]init pulling
[ns_server:debug,2014-08-19T15:46:32.221,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:init:68]init pushing
[ns_server:debug,2014-08-19T15:46:32.221,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:init:72]init reannouncing
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_events<0.279.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_events<0.279.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:46:32.222,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[error_logger:info,2014-08-19T15:46:32.223,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.312.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.223,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3Qs"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T15:46:32.223,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[error_logger:info,2014-08-19T15:46:32.223,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.304.0>},
{name,ns_node_disco_sup},
{mfa,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T15:46:32.223,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([alert_limits,auto_failover_cfg,autocompaction,
buckets,cert_and_pkey,cluster_compat_version,
drop_request_memory_threshold_mib,
dynamic_config_version,email_alerts,
fast_warmup,index_aware_rebalance_disabled,
max_bucket_count,memory_quota,nodes_wanted,otp,
remote_clusters,replication,
replication_topology,rest,rest_creds,
server_groups,set_view_update_daemon,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{request_limit,capi},
{request_limit,rest},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port},
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
{node,'ns_1@127.0.0.1',ssl_rest_port}]..)
[ns_server:debug,2014-08-19T15:46:32.223,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T15:46:32.223,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T15:46:32.224,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[error_logger:info,2014-08-19T15:46:32.224,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.318.0>},
{name,vbucket_map_mirror},
{mfa,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.224,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T15:46:32.224,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T15:46:32.224,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T15:46:32.224,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T15:46:32.225,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2014-08-19T15:46:32.225,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[error_logger:info,2014-08-19T15:46:32.225,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.320.0>},
{name,bucket_info_cache},
{mfa,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.225,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[error_logger:info,2014-08-19T15:46:32.225,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.323.0>},
{name,ns_tick_event},
{mfa,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[error_logger:info,2014-08-19T15:46:32.226,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.324.0>},
{name,buckets_events},
{mfa,{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[]
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T15:46:32.226,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T15:46:32.227,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T15:46:32.228,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T15:46:32.258,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T15:46:32.258,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T15:46:32.258,ns_1@127.0.0.1:<0.315.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:46:32.259,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T15:46:32.259,ns_1@127.0.0.1:<0.315.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:46:32.259,ns_1@127.0.0.1:ns_log_events<0.303.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2014-08-19T15:46:32.259,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.326.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.260,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.325.0>},
{name,ns_mail_sup},
{mfa,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.260,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.327.0>},
{name,ns_stats_event},
{mfa,{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.261,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.328.0>},
{name,samples_loader_tasks},
{mfa,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.285,ns_1@127.0.0.1:ns_cookie_manager<0.276.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[error_logger:info,2014-08-19T15:46:32.285,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.329.0>},
{name,ns_heart},
{mfa,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.285,ns_1@127.0.0.1:<0.316.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T15:46:32.285,ns_1@127.0.0.1:<0.316.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T15:46:32.287,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.333.0>},
{name,ns_doctor},
{mfa,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.288,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.331.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[ns_server:info,2014-08-19T15:46:32.292,ns_1@127.0.0.1:remote_clusters_info<0.336.0>:remote_clusters_info:read_or_create_table:540]Reading remote_clusters_info content from /opt/couchbase/var/lib/couchbase/remote_clusters_cache_v3
[error_logger:info,2014-08-19T15:46:32.295,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.336.0>},
{name,remote_clusters_info},
{mfa,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.295,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.339.0>},
{name,master_activity_events},
{mfa,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.296,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.331.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1},
{proc_lib,init_p_do_apply,3}]}
[ns_server:debug,2014-08-19T15:46:32.298,ns_1@127.0.0.1:ns_server_sup<0.289.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2014-08-19T15:46:32.298,ns_1@127.0.0.1:ns_server_sup<0.289.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2014-08-19T15:46:32.298,ns_1@127.0.0.1:ns_server_sup<0.289.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2014-08-19T15:46:32.300,ns_1@127.0.0.1:mb_master<0.342.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:debug,2014-08-19T15:46:32.304,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.331.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[ns_server:debug,2014-08-19T15:46:32.305,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.331.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1}]}
[ns_server:debug,2014-08-19T15:46:32.311,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T15:46:32.311,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([dynamic_config_version]..)
[ns_server:debug,2014-08-19T15:46:32.311,ns_1@127.0.0.1:mb_master_sup<0.347.0>:misc:start_singleton:986]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.349.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:46:32.311,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.349.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,20},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.313,ns_1@127.0.0.1:mb_master_sup<0.347.0>:misc:start_singleton:986]start_singleton(gen_server, ns_tick, [], []): started as <0.352.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:46:32.313,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.352.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.314,ns_1@127.0.0.1:<0.353.0>:auto_failover:init:134]init auto_failover.
[ns_server:debug,2014-08-19T15:46:32.315,ns_1@127.0.0.1:mb_master_sup<0.347.0>:misc:start_singleton:986]start_singleton(gen_server, auto_failover, [], []): started as <0.353.0> on 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T15:46:32.315,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.353.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.315,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.342.0>},
{name,mb_master},
{mfa,{mb_master,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.315,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.354.0>},
{name,master_activity_events_ingress},
{mfa,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.315,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.355.0>},
{name,master_activity_events_timestamper},
{mfa,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.343,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.356.0>},
{name,master_activity_events_pids_watcher},
{mfa,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.360,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.357.0>},
{name,master_activity_events_keeper},
{mfa,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.393,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.361.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.395,ns_1@127.0.0.1:ns_ssl_services_setup<0.361.0>:ns_ssl_services_setup:restart_xdcr_proxy:201]Xdcr proxy restart failed. But that's usually normal. {'EXIT',
{{badmatch,
{badrpc,
{'EXIT',
{{case_clause,
false},
[{ns_child_ports_sup,
restart_port_by_name,
1},
{rpc,
'-handle_call_call/6-fun-0-',
5}]}}}},
[{ns_ports_setup,
restart_xdcr_proxy,
0},
{ns_ssl_services_setup,
restart_xdcr_proxy,
0},
{ns_ssl_services_setup,
init,1},
{gen_server,init_it,
6},
{proc_lib,
init_p_do_apply,
3}]}}
[error_logger:info,2014-08-19T15:46:32.414,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.363.0>},
{name,ns_rest_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_rest_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.415,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.380.0>},
{name,ns_capi_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_capi_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.415,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.360.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.416,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.397.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.417,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.398.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.418,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.399.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.418,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.400.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.419,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.417.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.420,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.418.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.424,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.419.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[user:info,2014-08-19T15:46:32.424,ns_1@127.0.0.1:ns_server_sup<0.289.0>:menelaus_sup:start_link:44]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'.
[error_logger:info,2014-08-19T15:46:32.424,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.359.0>},
{name,menelaus},
{mfa,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.425,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.421.0>},
{name,mc_couch_events},
{mfargs,
{gen_event,start_link,[{local,mc_couch_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.426,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.422.0>},
{name,mc_conn_sup},
{mfargs,{mc_conn_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,supervisor}]
[ns_server:info,2014-08-19T15:46:32.426,ns_1@127.0.0.1:<0.423.0>:mc_tcp_listener:init:24]mccouch is listening on port 11213
[error_logger:info,2014-08-19T15:46:32.426,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.423.0>},
{name,mc_tcp_listener},
{mfargs,{mc_tcp_listener,start_link,[11213]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.427,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.420.0>},
{name,mc_sup},
{mfa,{mc_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.427,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.424.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.427,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.425.0>},
{name,ns_port_memcached_killer},
{mfa,{ns_ports_setup,start_memcached_force_killer,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:32.428,ns_1@127.0.0.1:<0.427.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/opt/couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2014-08-19T15:46:32.428,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.427.0>},
{name,ns_memcached_log_rotator},
{mfa,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.431,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.429.0>},
{name,memcached_clients_pool},
{mfa,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.434,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.430.0>},
{name,proxied_memcached_clients_pool},
{mfa,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.434,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.431.0>},
{name,xdc_lhttpc_pool},
{mfa,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.434,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.432.0>},
{name,ns_null_connection_pool},
{mfa,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.435,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.433.0>},
{name,xdc_replication_sup},
{mfa,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.436,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.434.0>},
{name,xdc_rep_manager},
{mfa,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.437,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.436.0>},
{name,ns_memcached_sockets_pool},
{mfa,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.439,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.439.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.440,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.441.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.440,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.440.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.441,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.438.0>},
{name,ns_bucket_worker_sup},
{mfa,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.441,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.442.0>},
{name,system_stats_collector},
{mfa,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.442,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.445.0>},
{name,{stats_archiver,"@system"}},
{mfa,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.442,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.447.0>},
{name,{stats_reader,"@system"}},
{mfa,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.446,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.448.0>},
{name,compaction_daemon},
{mfa,{compaction_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2014-08-19T15:46:32.447,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:46:32.447,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:46:32.454,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.450.0>:xdc_rdoc_replication_srv:init:76]Loaded the following docs:
[]
[ns_server:debug,2014-08-19T15:46:32.454,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.450.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[error_logger:info,2014-08-19T15:46:32.454,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.450.0>},
{name,xdc_rdoc_replication_srv},
{mfa,{xdc_rdoc_replication_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T15:46:32.455,ns_1@127.0.0.1:set_view_update_daemon<0.452.0>:set_view_update_daemon:init:50]Set view update daemon, starting with the following settings:
update interval: 5000ms
minimum number of changes: 5000
[ns_server:debug,2014-08-19T15:46:32.455,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:104]Entered child_loop
[error_logger:info,2014-08-19T15:46:32.455,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.452.0>},
{name,set_view_update_daemon},
{mfa,{set_view_update_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T15:46:32.455,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.289.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T15:46:32.455,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: ns_server
started_at: 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T15:47:02.448,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:47:02.448,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:47:32.449,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:47:32.449,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:48:02.450,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:48:02.450,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:48:32.451,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:48:32.451,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:49:02.452,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:49:02.452,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:49:32.453,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:49:32.453,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:50:02.454,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:50:02.454,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:50:32.455,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:50:32.455,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:51:02.456,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:51:02.456,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:51:32.457,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:51:32.457,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:52:02.458,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:52:02.458,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:52:32.459,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:52:32.459,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:53:02.460,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:53:02.460,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:53:32.461,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:53:32.461,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:54:02.462,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:54:02.462,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:54:32.463,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:54:32.463,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:55:02.464,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:55:02.464,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:55:32.465,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:55:32.465,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:56:02.466,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:56:02.466,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:56:32.467,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:56:32.467,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:57:02.468,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:57:02.468,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:57:32.469,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:57:32.469,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:58:02.470,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:58:02.470,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:58:32.471,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:58:32.471,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:59:02.472,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:59:02.472,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T15:59:32.473,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T15:59:32.473,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:00:02.474,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:00:02.474,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:00:32.510,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:00:32.511,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:01:02.512,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:01:02.512,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:01:32.513,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:01:32.513,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:02:02.514,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:02:02.514,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:02:32.519,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:02:32.519,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:03:02.520,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:03:02.520,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:03:32.521,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:03:32.521,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:04:02.522,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:04:02.522,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:04:32.523,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:04:32.523,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:05:02.524,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:05:02.524,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:05:32.525,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:05:32.525,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:06:02.526,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:06:02.526,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:06:32.528,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:06:32.528,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:07:02.529,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:07:02.529,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:07:32.530,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:07:32.530,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:08:02.531,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:08:02.531,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:08:32.532,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:08:32.532,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:09:02.533,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:09:02.533,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:09:32.534,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:09:32.534,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:10:02.535,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:10:02.535,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:10:32.536,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:10:32.537,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:11:02.538,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:11:02.538,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:11:32.539,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:11:32.539,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:12:02.540,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:12:02.540,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:12:32.546,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:12:32.546,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:13:02.547,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:13:02.547,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:13:32.548,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:13:32.548,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:14:02.549,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:14:02.549,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:14:32.550,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:14:32.550,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:15:02.551,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:15:02.551,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:15:32.552,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:15:32.552,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:16:02.553,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:16:02.553,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:16:32.554,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:16:32.554,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:17:02.555,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:17:02.555,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:17:32.556,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:17:32.556,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:18:02.557,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:18:02.557,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:18:32.558,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:18:32.558,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:19:02.559,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:19:02.559,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:19:32.560,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:19:32.560,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:20:02.561,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:20:02.561,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:20:32.562,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:20:32.562,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:21:02.563,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:21:02.563,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:21:32.564,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:21:32.564,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:22:02.565,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:22:02.565,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:22:32.566,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:22:32.566,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:23:02.567,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:23:02.567,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:23:32.568,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:23:32.568,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:24:02.569,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:24:02.569,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:24:32.570,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:24:32.570,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:25:02.571,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:25:02.571,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:25:32.572,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:25:32.572,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:26:02.573,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:26:02.573,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:26:32.574,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:26:32.574,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:27:02.575,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:27:02.575,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:27:32.576,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:27:32.576,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:28:02.577,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:28:02.577,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:28:32.578,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:28:32.578,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:29:02.579,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:29:02.579,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:29:32.580,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:29:32.580,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:30:02.581,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:30:02.581,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:30:32.582,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:30:32.582,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:31:02.583,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:31:02.583,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:31:32.584,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:31:32.584,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:32:02.585,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:32:02.585,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:32:32.586,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:32:32.586,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:33:02.587,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:33:02.587,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:33:32.588,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:33:32.588,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:34:02.589,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:34:02.589,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:34:32.590,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:34:32.590,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:35:02.591,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:35:02.591,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:35:32.592,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:35:32.592,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:36:02.593,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:36:02.593,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:36:32.594,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:36:32.594,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:37:02.595,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:37:02.595,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:37:32.596,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:37:32.596,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:38:02.597,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:38:02.597,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:38:32.598,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:38:32.598,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:39:02.599,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:39:02.599,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:39:32.600,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:39:32.600,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:40:02.601,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:40:02.601,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:40:32.602,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:40:32.602,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:41:02.603,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:41:02.603,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:41:32.604,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:41:32.604,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:42:02.605,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:42:02.605,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:42:32.606,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:42:32.606,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:43:02.607,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:43:02.607,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:43:32.608,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:43:32.608,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:44:02.609,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:44:02.609,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:44:32.610,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:44:32.610,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:45:02.611,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:45:02.611,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:45:32.612,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:45:32.612,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:46:02.613,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:46:02.613,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:46:32.621,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:46:32.621,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:47:02.622,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:47:02.622,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:47:32.623,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:47:32.623,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:48:02.624,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:48:02.624,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:48:32.625,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:48:32.625,ns_1@127.0.0.1:compaction_daemon<0.448.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:48:43.418,ns_1@127.0.0.1:ns_config_log<0.284.0>:ns_config_log:log_common:138]config change:
uuid ->
<<"7470311bdaa2a4acd47d21222af5c9ae">>
[ns_server:debug,2014-08-19T16:48:43.418,ns_1@127.0.0.1:ns_config_rep<0.312.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([uuid]..)
[user:info,2014-08-19T16:48:58.464,ns_1@127.0.0.1:<0.10867.0>:ns_storage_conf:setup_disk_storage_conf:116]Setting database directory path to /var/lib/pgsql and index directory path to /var/lib/pgsql
[ns_server:info,2014-08-19T16:48:58.465,ns_1@127.0.0.1:<0.10867.0>:ns_storage_conf:setup_disk_storage_conf:124]Removing all the buckets because database path has changed (old database path /opt/couchbase/var/lib/couchbase/data)
[ns_server:info,2014-08-19T16:48:58.465,ns_1@127.0.0.1:<0.10867.0>:ns_storage_conf:setup_disk_storage_conf:130]Removing all unused database files
[ns_server:debug,2014-08-19T16:48:58.473,ns_1@127.0.0.1:<0.453.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.452.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.473,ns_1@127.0.0.1:<0.449.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.448.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.776,ns_1@127.0.0.1:<0.446.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_stats_event,<0.445.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.777,ns_1@127.0.0.1:<0.444.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_tick_event,<0.442.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.777,ns_1@127.0.0.1:<0.441.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.440.0>} exited with reason shutdown
[error_logger:error,2014-08-19T16:48:58.777,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_bucket_sup}
Context: shutdown_error
Reason: normal
Offender: [{pid,<0.441.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:48:58.777,ns_1@127.0.0.1:<0.426.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.424.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:48:58.777,ns_1@127.0.0.1:<0.428.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.425.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:48:58.778,ns_1@127.0.0.1:<0.362.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.361.0>} exited with reason shutdown
[ns_server:info,2014-08-19T16:48:58.778,ns_1@127.0.0.1:mb_master<0.342.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[ns_server:debug,2014-08-19T16:48:58.778,ns_1@127.0.0.1:<0.358.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {master_activity_events,<0.357.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:48:58.778,ns_1@127.0.0.1:<0.343.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.342.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.334.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.333.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.330.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {buckets_events,<0.329.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.322.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.320.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.319.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.318.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.313.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events_local,<0.312.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.301.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.299.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.779,ns_1@127.0.0.1:<0.297.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.296.0>} exited with reason killed
[error_logger:error,2014-08-19T16:48:58.781,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: gen_event:init_it/6
pid: <0.321.0>
registered_name: bucket_info_cache_invalidations
exception exit: killed
in function gen_event:terminate_server/4
ancestors: [bucket_info_cache,ns_server_sup,ns_server_cluster_sup,
<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 233
stack_size: 24
reductions: 119
neighbours:
[ns_server:debug,2014-08-19T16:48:58.882,ns_1@127.0.0.1:ns_config<0.281.0>:ns_config:wait_saver:652]Done waiting for saver.
[ns_server:debug,2014-08-19T16:48:58.882,ns_1@127.0.0.1:<0.287.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.286.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:48:58.882,ns_1@127.0.0.1:<0.285.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.284.0>} exited with reason shutdown
[error_logger:error,2014-08-19T16:48:58.882,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_server_cluster_sup}
Context: shutdown_error
Reason: killed
Offender: [{pid,<0.288.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:error,2014-08-19T16:48:58.883,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Generic server <0.220.0> terminating
** Last message in was {'EXIT',<0.210.0>,killed}
** When Server state == {db,<0.220.0>,<0.221.0>,nil,<<"1408448790897241">>,
<0.217.0>,<0.222.0>,
{db_header,11,1,
<<0,0,0,0,13,103,0,0,0,0,0,51,0,0,0,0,1,0,0,0,
0,0,0,0,0,0,13,69>>,
<<0,0,0,0,13,154,0,0,0,0,0,49,0,0,0,0,1>>,
nil,0,nil,nil},
1,
{btree,<0.217.0>,
{3431,
<<0,0,0,0,1,0,0,0,0,0,0,0,0,0,13,69>>,
51},
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.217.0>,
{3482,<<0,0,0,0,1>>,49},
#Fun,
#Fun,
#Fun,
#Fun,1279,2558,
true},
{btree,<0.217.0>,nil,identity,identity,
#Fun,nil,1279,2558,
true},
1,<<"_users">>,
"/opt/couchbase/var/lib/couchbase/data/_users.couch.1",
[],nil,
{user_ctx,null,[],undefined},
nil,
[before_header,after_header,on_file_open],
[]}
** Reason for termination ==
** killed
[error_logger:error,2014-08-19T16:48:58.884,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Generic server <0.214.0> terminating
** Last message in was {'EXIT',<0.210.0>,killed}
** When Server state == {db,<0.214.0>,<0.215.0>,nil,<<"1408448790891054">>,
<0.211.0>,<0.216.0>,
{db_header,11,0,nil,nil,nil,0,nil,nil},
0,
{btree,<0.211.0>,nil,
#Fun,
#Fun,
#Fun,
#Fun,1279,
2558,true},
{btree,<0.211.0>,nil,
#Fun,
#Fun,
#Fun,
#Fun,1279,
2558,true},
{btree,<0.211.0>,nil,identity,identity,
#Fun,nil,1279,2558,
true},
0,<<"_replicator">>,
"/opt/couchbase/var/lib/couchbase/data/_replicator.couch.1",
[],nil,
{user_ctx,null,[],undefined},
nil,
[before_header,after_header,on_file_open],
[]}
** Reason for termination ==
** killed
[error_logger:error,2014-08-19T16:48:58.884,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_db:init/1
pid: <0.214.0>
registered_name: []
exception exit: killed
in function gen_server:terminate/6
ancestors: [couch_server,couch_primary_services,couch_server_sup,
cb_couch_sup,ns_server_cluster_sup,<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 24
reductions: 249
neighbours:
[error_logger:error,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: couch_db:init/1
pid: <0.220.0>
registered_name: []
exception exit: killed
in function gen_server:terminate/6
ancestors: [couch_server,couch_primary_services,couch_server_sup,
cb_couch_sup,ns_server_cluster_sup,<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 24
reductions: 210
neighbours:
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================INFO REPORT=========================
application: mapreduce
exited: stopped
type: temporary
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================INFO REPORT=========================
application: couch_view_parser
exited: stopped
type: temporary
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================INFO REPORT=========================
application: couch_index_merger
exited: stopped
type: temporary
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================INFO REPORT=========================
application: couch_set_view
exited: stopped
type: temporary
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_view_parser
started_at: 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_set_view
started_at: 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T16:48:58.885,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: couch_index_merger
started_at: 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T16:48:58.886,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
application: mapreduce
started_at: 'ns_1@127.0.0.1'
[error_logger:info,2014-08-19T16:48:58.911,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.10932.0>},
{name,couch_config},
{mfargs,
{couch_server_sup,couch_config_start_link_wrapper,
[["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/default.d/capi.ini",
"/opt/couchbase/etc/couchdb/default.d/geocouch.ini",
"/opt/couchbase/etc/couchdb/local.ini"],
<0.10932.0>]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:48:58.917,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.10935.0>},
{name,collation_driver},
{mfargs,{couch_drv,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:48:58.918,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.10936.0>},
{name,couch_task_events},
{mfargs,
{gen_event,start_link,[{local,couch_task_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:48:58.918,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.10937.0>},
{name,couch_task_status},
{mfargs,{couch_task_status,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:48:58.918,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.10938.0>},
{name,couch_file_write_guard},
{mfargs,{couch_file_write_guard,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.551,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.10939.0>},
{name,couch_server},
{mfargs,{couch_server,sup_start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.552,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17108.0>},
{name,couch_db_update_event},
{mfargs,
{gen_event,start_link,[{local,couch_db_update}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.552,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17109.0>},
{name,couch_replication_event},
{mfargs,
{gen_event,start_link,[{local,couch_replication}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.552,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17110.0>},
{name,couch_replication_supervisor},
{mfargs,{couch_rep_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.552,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17111.0>},
{name,couch_log},
{mfargs,{couch_log,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.552,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17112.0>},
{name,couch_main_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_main_index_barrier,
"max_parallel_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.553,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17113.0>},
{name,couch_replica_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_replica_index_barrier,
"max_parallel_replica_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.553,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_primary_services}
started: [{pid,<0.17114.0>},
{name,couch_spatial_index_barrier},
{mfargs,
{couch_index_barrier,start_link,
[couch_spatial_index_barrier,
"max_parallel_spatial_indexers"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.553,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.10934.0>},
{name,couch_primary_services},
{mfargs,{couch_primary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.553,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17116.0>},
{name,couch_db_update_notifier_sup},
{mfargs,{couch_db_update_notifier_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.554,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17117.0>},
{name,auth_cache},
{mfargs,{couch_auth_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.554,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17119.0>},
{name,set_view_manager},
{mfargs,{couch_set_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17121.0>},
{name,spatial_manager},
{mfargs,{couch_spatial,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17123.0>},
{name,index_merger_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{connection_timeout,90000},
{pool_size,10000},
{name,couch_index_merger_connection_pool}]]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17124.0>},
{name,query_servers},
{mfargs,{couch_query_servers,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17126.0>},
{name,couch_set_view_ddoc_cache},
{mfargs,{couch_set_view_ddoc_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.555,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17128.0>},
{name,view_manager},
{mfargs,{couch_view,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.556,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17130.0>},
{name,httpd},
{mfargs,{couch_httpd,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.557,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_secondary_services}
started: [{pid,<0.17147.0>},
{name,uuids},
{mfargs,{couch_uuids,start,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.557,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,couch_server_sup}
started: [{pid,<0.17115.0>},
{name,couch_secondary_services},
{mfargs,{couch_secondary_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.557,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,cb_couch_sup}
started: [{pid,<0.10933.0>},
{name,couch_app},
{mfargs,
{couch_app,start,
[fake,
["/opt/couchbase/etc/couchdb/default.ini",
"/opt/couchbase/etc/couchdb/local.ini"]]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2014-08-19T16:49:02.560,ns_1@127.0.0.1:ns_server_cluster_sup<0.160.0>:log_os_info:start_link:25]OS type: {unix,linux} Version: {2,6,32}
Runtime info: [{otp_release,"R14B04"},
{erl_version,"5.8.5"},
{erl_version_long,
"Erlang R14B04 (erts-5.8.5) [source] [64-bit] [smp:24:24] [rq:24] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-unknown-linux-gnu"},
{system_arch,"x86_64-unknown-linux-gnu"},
{localtime,{{2014,8,19},{16,49,2}}},
{memory,
[{total,601493728},
{processes,39621448},
{processes_used,39614480},
{system,561872280},
{atom,1158025},
{atom_used,1136631},
{binary,434880},
{code,10957303},
{ets,2290232}]},
{loaded,
[lib,capi_utils,mochiweb_mime,mochiweb_io,stats_collector,
menelaus_web_remote_clusters,mb_grid,ejson,
mochiweb_response,menelaus_web_buckets,menelaus_auth,
mochiweb_util,mochiweb_request,mochiweb_headers,
set_view_update_daemon,xdc_rdoc_replication_srv,
compaction_daemon,stats_archiver,ns_bucket_sup,
ns_bucket_worker_sup,couch_changes,
ns_memcached_sockets_pool,xdc_rep_manager,
ns_null_connection_pool,proxied_memcached_clients_pool,
ns_moxi_sup,ns_connection_pool,memcached_clients_pool,
ns_cluster_membership,ns_memcached_log_rotator,
mc_tcp_listener,mc_conn_sup,mc_sup,
menelaus_web_alerts_srv,hot_keys_keeper,menelaus_event,
menelaus_stats_gatherer,menelaus_web_cache,
menelaus_ui_auth,ssl_tls1,ssl_cipher,ssl_record,mochiweb,
menelaus_util,menelaus_web,ns_ports_setup,ns_server_cert,
ns_ssl_services_setup,ns_ssl_services_sup,menelaus_sup,
ringbuffer,master_activity_events_keeper,
master_activity_events_pids_watcher,auto_failover,ns_tick,
ns_online_config_upgrader,ns_orchestrator,
master_activity_events,system_stats_collector,
mb_master_sup,failover_safeness_level,gen_fsm,mb_master,
xdc_replication_sup,ns_bucket,remote_clusters_info,
stats_reader,ns_doctor,ns_heart,samples_loader_tasks,
ns_mail_log,ns_mail_sup,bucket_info_cache,
vbucket_map_mirror,ns_node_disco_rep_events,ns_config_rep,
ns_node_disco_conf_events,ns_node_disco_log,net_adm,
cluster_compat_mode,ns_node_disco,ns_node_disco_sup,
ns_memcached,dist_util,ns_config_isasl_sync,ns_crash_log,
ns_config_ets_dup,random,timer2,ns_log,request_throttler,
menelaus_deps,dir_size,work_queue,supervisor2,
ns_server_sup,ns_process_registry,cb_config_couch_sync,
ns_pubsub,ns_config_replica,ns_config_log,vclock,
ns_storage_conf,ns_config_default,ns_config,ns_config_sup,
ns_cluster,ns_cookie_manager,erl_epmd,inet_tcp_dist,
gen_udp,dist_manager,timeout_diag_logger,path_config,
diag_handler,auth,ns_info,log_os_info,couch_config_writer,
cb_init_loggers,couch_uuids,mochiweb_acceptor,inet_tcp,
gen_tcp,mochiweb_socket,mochiweb_socket_server,mochilists,
mochiweb_http,eval_bits,couch_httpd,couch_view,
couch_set_view_ddoc_cache,couch_query_servers,
couch_spatial,mapreduce,couch_set_view,
couch_db_update_notifier,snappy,couch_compress,
couch_auth_cache,couch_db_update_notifier_sup,
couch_secondary_sup,queue,couch_index_barrier,
couch_event_sup,couch_log,couch_rep_sup,couch_btree,
couch_ref_counter,couch_db_updater,couch_db,httpd_util,
filelib,couch_file,couch_file_write_guard,
couch_task_status,erl_ddll,couch_drv,couch_primary_sup,
couch_server,string,re,file2,couch_util,couch_config,
couch_server_sup,ssl_server,crypto,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,ssl_connection_sup,ssl_session_cache,
ssl_certificate_db,ssl_manager,ssl_broker_sup,ssl_sup,
ssl_app,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets,httpc_manager,httpc,httpc_profile_sup,httpc_sup,
ftp_sup,inets_sup,inets_app,crypto_server,crypto_sup,
crypto_app,couch_app,cb_couch_sup,ns_server_cluster_sup,
mlockall,calendar,ale_default_formatter,otp_internal,misc,
'ale_logger-xdcr','ale_logger-mapreduce_errors',
'ale_logger-views','ale_logger-cluster',timer,
io_lib_fread,'ale_logger-rebalance','ale_logger-stats',
'ale_logger-ns_doctor','ale_logger-menelaus',
'ale_logger-user','ale_logger-ns_server',
'ale_logger-couchdb',ns_log_sink,disk_log_sup,
disk_log_server,disk_log_1,disk_log,ale_disk_sink,
ns_server,cpu_sup,memsup,disksup,os_mon,io,
release_handler,overload,alarm_handler,log_mf_h,sasl,
ale_error_logger_handler,'ale_logger-ale_logger',
'ale_logger-error_logger',beam_opcodes,beam_dict,beam_asm,
beam_validator,beam_flatten,beam_trim,beam_receive,
beam_bsm,beam_peep,beam_dead,beam_type,beam_bool,
beam_clean,beam_utils,beam_jump,beam_block,v3_codegen,
v3_life,v3_kernel,sys_core_dsetel,erl_bifs,sys_core_fold,
cerl_trees,sys_core_inline,core_lib,cerl,v3_core,erl_bits,
erl_expand_records,sys_pre_expand,sofs,erl_internal,sets,
ordsets,erl_lint,compile,dynamic_compile,ale_utils,
io_lib_pretty,io_lib_format,io_lib,ale_codegen,dict,ale,
ale_dynamic_sup,ale_sup,ale_app,ns_bootstrap,child_erlang,
file_io_server,orddict,erl_eval,file,c,kernel_config,
user_sup,supervisor_bridge,standard_error,unicode,binary,
ets,gb_sets,hipe_unified_loader,packages,code_server,code,
file_server,net_kernel,global_group,erl_distribution,
filename,inet_gethost_native,os,inet_parse,inet,inet_udp,
inet_config,inet_db,global,gb_trees,rpc,supervisor,kernel,
application_master,sys,application,gen_server,erl_parse,
proplists,erl_scan,lists,application_controller,proc_lib,
gen,gen_event,error_logger,heart,error_handler,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,init,
otp_ring0]},
{applications,
[{public_key,"Public key infrastructure","0.13"},
{asn1,"The Erlang ASN1 compiler version 1.6.18","1.6.18"},
{lhttpc,"Lightweight HTTP Client","1.3.0"},
{ale,"Another Logger for Erlang","8ca6d2a"},
{os_mon,"CPO CXC 138 46","2.2.7"},
{couch_set_view,"Set views","1.2.0a-a425d97-git"},
{compiler,"ERTS CXC 138 10","4.7.5"},
{inets,"INETS CXC 138 49","5.7.1"},
{couch,"Apache CouchDB","1.2.0a-a425d97-git"},
{mapreduce,"MapReduce using V8 JavaScript engine","1.0.0"},
{couch_index_merger,"Index merger","1.2.0a-a425d97-git"},
{kernel,"ERTS CXC 138 10","2.14.5"},
{crypto,"CRYPTO version 2","2.0.4"},
{ssl,"Erlang/OTP SSL application","4.1.6"},
{sasl,"SASL CXC 138 11","2.1.10"},
{couch_view_parser,"Couch view parser","1.0.0"},
{ns_server,"Couchbase server","2.5.1-1083-rel-enterprise"},
{mochiweb,"MochiMedia Web Server","2.4.2"},
{syntax_tools,"Syntax tools","1.6.7.1"},
{xmerl,"XML parser","1.2.10"},
{oauth,"Erlang OAuth implementation","7d85d3ef"},
{stdlib,"ERTS CXC 138 10","1.17.5"}]},
{pre_loaded,
[erlang,erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
init,otp_ring0]},
{process_count,6319},
{node,'ns_1@127.0.0.1'},
{nodes,[]},
{registered,
[disk_log_sup,disk_log_server,httpc_sup,ssl_broker_sup,
code_server,httpc_profile_sup,couch_set_view_ddoc_cache,
httpc_manager,ssl_server,inet_gethost_native_sup,
httpc_handler_sup,ssl_sup,application_controller,
couch_index_merger_connection_pool,ftp_sup,couch_spatial,
standard_error_sup,inets_sup,crypto_server,crypto_sup,
couch_secondary_services,couch_primary_services,
couch_db_update,couch_config,error_logger,couch_server,
couch_uuids,'sink-disk_default',os_mon_sup,cpu_sup,memsup,
disksup,kernel_safe_sup,auth,couch_db_update_notifier_sup,
dist_manager,couch_log,couch_auth_cache,couch_rep_sup,
sasl_safe_sup,couch_view,couch_server_sup,cb_couch_sup,
timer_server,couch_query_servers,couch_task_status,
couch_httpd,couch_drv,rex,couch_file_write_guard,net_sup,
kernel_sup,global_name_server,sasl_sup,net_kernel,
file_server_2,release_handler,overload,alarm_handler,
ale_sup,ale_dynamic_sup,lhttpc_sup,ale,
couch_spatial_index_barrier,couch_replica_index_barrier,
couch_main_index_barrier,couch_set_view,couch_replication,
couch_task_events,lhttpc_manager,timer2_server,tftp_sup,
ns_server_cluster_sup,standard_error,erl_prim_loader,
inet_gethost_native,init,inet_db,httpd_sup,'sink-ns_log',
'sink-disk_stats','sink-disk_xdcr_errors',
'sink-disk_xdcr','sink-disk_debug','sink-disk_couchdb',
'sink-disk_mapreduce_errors','sink-disk_views',
global_group,'sink-disk_error',ssl_connection_sup,
ssl_manager,erl_epmd]},
{cookie,alkbqedpsntmtnxa},
{wordsize,8},
{wall_clock,3753}]
[ns_server:info,2014-08-19T16:49:02.562,ns_1@127.0.0.1:ns_server_cluster_sup<0.160.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," "]
[error_logger:info,2014-08-19T16:49:02.563,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17149.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.564,ns_1@127.0.0.1:ns_config_sup<0.17152.0>:ns_config_sup:init:32]loading static ns_config from "/opt/couchbase/etc/couchbase/config"
[error_logger:info,2014-08-19T16:49:02.564,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17150.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.564,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17151.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.564,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17153.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.564,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17154.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.565,ns_1@127.0.0.1:ns_config<0.17155.0>:ns_config:load_config:795]Loading static config from "/opt/couchbase/etc/couchbase/config"
[ns_server:info,2014-08-19T16:49:02.566,ns_1@127.0.0.1:ns_config<0.17155.0>:ns_config:load_config:809]Loading dynamic config from "/opt/couchbase/var/lib/couchbase/config/config.dat"
[ns_server:debug,2014-08-19T16:49:02.566,ns_1@127.0.0.1:ns_config<0.17155.0>:ns_config:load_config:816]Here's full dynamic config we loaded:
[[{uuid,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575671723}}]}|
<<"7470311bdaa2a4acd47d21222af5c9ae">>]},
{dynamic_config_version,
[{'_vclock',[{'ns_1@127.0.0.1',{5,63575667474}}]},2,5]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{auto_failover_cfg,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{enabled,false},
{timeout,120},
{max_nodes,1},
{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{buckets,[{configs,[]}]},
{cert_and_pkey,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667478}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3QsYDNvapLjSSawb2oEl8ssA4mmR\ne2P+F4r1j3FAsOsO0VOuKtmsul6utqBCmO34s0vYc6X58RbQVYx8iu5XiTFu5rTi\nFbuHeJ+rjVi4gMxuD4yVIkTJq4KED+p1SkD9H4YvUWy5O7XlmPsA30fmdMpKsZWi\n6QIDAQABowIwADALBgkqhkiG9w0BAQUDggEBADSaYJBLzwuTm8X5KVmfNhrblZTL\n3Lc/PewFJZvp3UuiF6xJQdQMO9mvLZ6MaY/Z4NL/sLionbrmQuGxxChpTwyLNL7a\n666VquUle7zrVYOJKlv/2hgFjk1rhfD0JpqwKFaRTYyMqBRG7hXkPlPZPFJVeAft\ntvYLLJc5Iou4tvQvw3lB6F3g2jpzW4UQMXKklf3c0pZqYKCNYvEt7elnIyS/Aata\nFViP8384q9BMsSeoyj/mDfV4czbAwYgZN5ZRylM+IElGWNZVBydbBQaGJgj3yJD3\n3+2X3gSf7HN33p4dPCEeNBKnL0vBdS3GPkDibxHzKv5J3euds09QGtsK4BQ=\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{cluster_compat_version,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},2,5]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memory_quota,58026},
{nodes_wanted,['ns_1@127.0.0.1']},
{otp,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667473}}]},
{cookie,alkbqedpsntmtnxa}]},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{replication_topology,star},
{rest,[{port,8091}]},
{rest_creds,[{creds,[]}]},
{server_groups,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},
[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{vbucket_map_history,[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]}]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{'ns_1@127.0.0.1',{7,63575667472}}]}|{2,3,0}]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',membership},active},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',
[{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091}]]
[ns_server:info,2014-08-19T16:49:02.568,ns_1@127.0.0.1:ns_config<0.17155.0>:ns_config:load_config:827]Here's full dynamic config we loaded + static & default config:
[{{node,'ns_1@127.0.0.1',ssl_rest_port},18091},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B","binary",
"-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,stream]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',
[{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',membership},active},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',
[{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{'ns_1@127.0.0.1',{7,63575667472}}]}|{2,3,0}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{vbucket_map_history,[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{server_groups,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},
[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{rest_creds,[{creds,[]}]},
{rest,[{port,8091}]},
{replication_topology,star},
{replication,[{enabled,true}]},
{remote_clusters,[]},
{otp,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667473}}]},
{cookie,alkbqedpsntmtnxa}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{memory_quota,58026},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{email_alerts,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]},
{drop_request_memory_threshold_mib,undefined},
{cluster_compat_version,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667474}}]},2,5]},
{cert_and_pkey,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667478}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3QsYDNvapLjSSawb2oEl8ssA4mmR\ne2P+F4r1j3FAsOsO0VOuKtmsul6utqBCmO34s0vYc6X58RbQVYx8iu5XiTFu5rTi\nFbuHeJ+rjVi4gMxuD4yVIkTJq4KED+p1SkD9H4YvUWy5O7XlmPsA30fmdMpKsZWi\n6QIDAQABowIwADALBgkqhkiG9w0BAQUDggEBADSaYJBLzwuTm8X5KVmfNhrblZTL\n3Lc/PewFJZvp3UuiF6xJQdQMO9mvLZ6MaY/Z4NL/sLionbrmQuGxxChpTwyLNL7a\n666VquUle7zrVYOJKlv/2hgFjk1rhfD0JpqwKFaRTYyMqBRG7hXkPlPZPFJVeAft\ntvYLLJc5Iou4tvQvw3lB6F3g2jpzW4UQMXKklf3c0pZqYKCNYvEt7elnIyS/Aata\nFViP8384q9BMsSeoyj/mDfV4czbAwYgZN5ZRylM+IElGWNZVBydbBQaGJgj3yJD3\n3+2X3gSf7HN33p4dPCEeNBKnL0vBdS3GPkDibxHzKv5J3euds09QGtsK4BQ=\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{buckets,[{configs,[]}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{auto_failover_cfg,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575667472}}]},
{enabled,false},
{timeout,120},
{max_nodes,1},
{count,0}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{dynamic_config_version,
[{'_vclock',[{'ns_1@127.0.0.1',{5,63575667474}}]},2,5]},
{uuid,
[{'_vclock',[{'ns_1@127.0.0.1',{1,63575671723}}]}|
<<"7470311bdaa2a4acd47d21222af5c9ae">>]}]
[error_logger:info,2014-08-19T16:49:02.571,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17155.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/opt/couchbase/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17157.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17158.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.17160.0>},
{name,cb_config_couch_sync},
{mfargs,{cb_config_couch_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17152.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:ns_server_sup<0.17163.0>:dir_size:start_link:47]Starting quick version of dir_size with program name: i386-linux-godu
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17162.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.572,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17164.0>},
{name,diag_handler_worker},
{mfa,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.573,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17165.0>},
{name,dir_size},
{mfa,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.573,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17166.0>},
{name,request_throttler},
{mfa,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.573,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17167.0>},
{name,ns_log},
{mfa,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.573,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17168.0>},
{name,ns_crash_log_consumer},
{mfa,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.573,ns_1@127.0.0.1:ns_config_isasl_sync<0.17171.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/opt/couchbase/var/lib/couchbase/isasl.pw","_admin",
"f6126ae5fac44bf3d8316165791747f2"]
[error_logger:info,2014-08-19T16:49:02.574,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17169.0>},
{name,ns_config_ets_dup},
{mfa,{ns_config_ets_dup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_isasl_sync<0.17171.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: []
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_isasl_sync<0.17171.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/isasl.pw"
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3Qs"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T16:49:02.574,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T16:49:02.575,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T16:49:02.576,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2014-08-19T16:49:02.576,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T16:49:02.576,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
uuid ->
<<"7470311bdaa2a4acd47d21222af5c9ae">>
[ns_server:debug,2014-08-19T16:49:02.576,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[]
[ns_server:debug,2014-08-19T16:49:02.576,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:02.578,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T16:49:02.579,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:02.579,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:02.579,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[error_logger:info,2014-08-19T16:49:02.580,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17171.0>},
{name,ns_config_isasl_sync},
{mfa,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_node_disco<0.17177.0>:ns_node_disco:init:103]Initting ns_node_disco with []
[error_logger:info,2014-08-19T16:49:02.580,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17174.0>},
{name,ns_log_events},
{mfa,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[error_logger:info,2014-08-19T16:49:02.580,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17176.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:02.580,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:02.616,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:02.617,ns_1@127.0.0.1:<0.17178.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T16:49:02.617,ns_1@127.0.0.1:<0.17178.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T16:49:02.617,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17177.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.617,ns_1@127.0.0.1:ns_config_rep<0.17183.0>:ns_config_rep:init:66]init pulling
[ns_server:debug,2014-08-19T16:49:02.617,ns_1@127.0.0.1:ns_config_rep<0.17183.0>:ns_config_rep:init:68]init pushing
[error_logger:info,2014-08-19T16:49:02.617,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17180.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.617,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17181.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.617,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17182.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_rep<0.17183.0>:ns_config_rep:init:72]init reannouncing
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[error_logger:info,2014-08-19T16:49:02.619,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17183.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[error_logger:info,2014-08-19T16:49:02.619,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17175.0>},
{name,ns_node_disco_sup},
{mfa,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([alert_limits,auto_failover_cfg,autocompaction,
buckets,cert_and_pkey,cluster_compat_version,
drop_request_memory_threshold_mib,
dynamic_config_version,email_alerts,
fast_warmup,index_aware_rebalance_disabled,
max_bucket_count,memory_quota,nodes_wanted,otp,
remote_clusters,replication,
replication_topology,rest,rest_creds,
server_groups,set_view_update_daemon,uuid,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{request_limit,capi},
{request_limit,rest},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port},
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
{node,'ns_1@127.0.0.1',ssl_rest_port}]..)
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[]}]
[error_logger:info,2014-08-19T16:49:02.619,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17186.0>},
{name,vbucket_map_mirror},
{mfa,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.619,ns_1@127.0.0.1:ns_log_events<0.17174.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17190.0>},
{name,bucket_info_cache},
{mfa,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQPzPIoEQwCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmwlh6UM1HlSt78Xr7YCe\n18VU0sN62xbybSOxadjU2gF03Q2jgd+n84Tr9iGKtuy7DUKk/eJJQDQWcCDGTxYg\n8QNmzAlnX/eufV4rhr/9nlksMKdIlXWDvOdLX4yO1FIZ/QvGtoFWBwEc832n3sfa\n1f+EzMV8X6nZxMPV/Stc0StxJPY2Akqi99je3Qs"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.17201.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17193.0>},
{name,ns_tick_event},
{mfa,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17194.0>},
{name,buckets_events},
{mfa,{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:remote_clusters_info<0.17206.0>:remote_clusters_info:read_or_create_table:540]Reading remote_clusters_info content from /opt/couchbase/var/lib/couchbase/remote_clusters_cache_v3
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.17201.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1},
{proc_lib,init_p_do_apply,3}]}
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.17196.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17195.0>},
{name,ns_mail_sup},
{mfa,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[error_logger:info,2014-08-19T16:49:02.620,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17197.0>},
{name,ns_stats_event},
{mfa,{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.620,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
memory_quota ->
58026
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[error_logger:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17198.0>},
{name,samples_loader_tasks},
{mfa,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,alkbqedpsntmtnxa}]
[error_logger:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17199.0>},
{name,ns_heart},
{mfa,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[error_logger:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17202.0>},
{name,ns_doctor},
{mfa,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[error_logger:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17206.0>},
{name,remote_clusters_info},
{mfa,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:mb_master<0.17213.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2014-08-19T16:49:02.621,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
uuid ->
<<"7470311bdaa2a4acd47d21222af5c9ae">>
[error_logger:info,2014-08-19T16:49:02.621,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17211.0>},
{name,master_activity_events},
{mfa,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[]
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.17201.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@127.0.0.1'},
{latest,"minute"}]}}}
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([dynamic_config_version]..)
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:mb_master_sup<0.17215.0>:misc:start_singleton:986]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.17216.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[error_logger:info,2014-08-19T16:49:02.622,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17216.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,20},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.17201.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1}]}
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:mb_master_sup<0.17215.0>:misc:start_singleton:986]start_singleton(gen_server, ns_tick, [], []): started as <0.17221.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T16:49:02.622,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',config_version} ->
{2,3,0}
[error_logger:info,2014-08-19T16:49:02.622,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17221.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',membership} ->
active
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:<0.17224.0>:auto_failover:init:134]init auto_failover.
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:mb_master_sup<0.17215.0>:misc:start_singleton:986]start_singleton(gen_server, auto_failover, [], []): started as <0.17224.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[error_logger:info,2014-08-19T16:49:02.623,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17224.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:02.623,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[error_logger:info,2014-08-19T16:49:02.623,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17213.0>},
{name,mb_master},
{mfa,{mb_master,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.623,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17225.0>},
{name,master_activity_events_ingress},
{mfa,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.624,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17226.0>},
{name,master_activity_events_timestamper},
{mfa,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[error_logger:info,2014-08-19T16:49:02.624,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17227.0>},
{name,master_activity_events_pids_watcher},
{mfa,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:02.624,ns_1@127.0.0.1:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[error_logger:info,2014-08-19T16:49:02.637,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17228.0>},
{name,master_activity_events_keeper},
{mfa,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.665,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:02.665,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:02.665,ns_1@127.0.0.1:<0.17188.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T16:49:02.665,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:02.665,ns_1@127.0.0.1:<0.17188.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[error_logger:info,2014-08-19T16:49:02.693,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17232.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.695,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17234.0>},
{name,ns_rest_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_rest_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17251.0>},
{name,ns_capi_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_capi_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17231.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17268.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17269.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.697,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17270.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17271.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17288.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17289.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[user:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:ns_server_sup<0.17163.0>:menelaus_sup:start_link:44]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'.
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17290.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17230.0>},
{name,menelaus},
{mfa,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:<0.17294.0>:mc_tcp_listener:init:24]mccouch is listening on port 11213
[error_logger:info,2014-08-19T16:49:02.698,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17292.0>},
{name,mc_couch_events},
{mfargs,
{gen_event,start_link,[{local,mc_couch_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.699,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17293.0>},
{name,mc_conn_sup},
{mfargs,{mc_conn_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.699,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17294.0>},
{name,mc_tcp_listener},
{mfargs,{mc_tcp_listener,start_link,[11213]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.699,ns_1@127.0.0.1:<0.17298.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/opt/couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2014-08-19T16:49:02.699,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17291.0>},
{name,mc_sup},
{mfa,{mc_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.699,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17295.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.700,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17296.0>},
{name,ns_port_memcached_killer},
{mfa,{ns_ports_setup,start_memcached_force_killer,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.700,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17298.0>},
{name,ns_memcached_log_rotator},
{mfa,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.700,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17300.0>},
{name,memcached_clients_pool},
{mfa,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.700,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17301.0>},
{name,proxied_memcached_clients_pool},
{mfa,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.701,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17302.0>},
{name,xdc_lhttpc_pool},
{mfa,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.701,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17303.0>},
{name,ns_null_connection_pool},
{mfa,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.701,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17304.0>},
{name,xdc_replication_sup},
{mfa,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.701,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17305.0>},
{name,xdc_rep_manager},
{mfa,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.702,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17307.0>},
{name,ns_memcached_sockets_pool},
{mfa,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.702,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.17310.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.702,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.17312.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.702,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.17311.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.702,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17308.0>},
{name,ns_bucket_worker_sup},
{mfa,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:02.703,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17313.0>},
{name,system_stats_collector},
{mfa,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.703,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17316.0>},
{name,{stats_archiver,"@system"}},
{mfa,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.703,ns_1@127.0.0.1:compaction_daemon<0.17319.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:49:02.703,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.17321.0>:xdc_rdoc_replication_srv:init:76]Loaded the following docs:
[]
[error_logger:info,2014-08-19T16:49:02.703,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17318.0>},
{name,{stats_reader,"@system"}},
{mfa,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.703,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.17321.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:02.703,ns_1@127.0.0.1:compaction_daemon<0.17319.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:info,2014-08-19T16:49:02.704,ns_1@127.0.0.1:set_view_update_daemon<0.17323.0>:set_view_update_daemon:init:50]Set view update daemon, starting with the following settings:
update interval: 5000ms
minimum number of changes: 5000
[error_logger:info,2014-08-19T16:49:02.704,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17319.0>},
{name,compaction_daemon},
{mfa,{compaction_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.704,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17321.0>},
{name,xdc_rdoc_replication_srv},
{mfa,{xdc_rdoc_replication_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.704,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17323.0>},
{name,set_view_update_daemon},
{mfa,{set_view_update_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.704,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17163.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[user:info,2014-08-19T16:49:02.705,ns_1@127.0.0.1:<0.17168.0>:ns_log:crash_consumption_loop:64]Port server moxi on node 'babysitter_of_ns_1@127.0.0.1' exited with status 0. Restarting. Messages: WARNING: curl error: transfer closed with outstanding read data remaining from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: couldn't connect to host from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: couldn't connect to host from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: couldn't connect to host from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
EOL on stdin. Exiting
[ns_server:debug,2014-08-19T16:49:02.709,ns_1@127.0.0.1:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:02.709,ns_1@127.0.0.1:<0.17189.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T16:49:02.709,ns_1@127.0.0.1:<0.17189.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: alkbqedpsntmtnxa
[cluster:info,2014-08-19T16:49:02.713,ns_1@127.0.0.1:ns_cluster<0.17151.0>:ns_cluster:handle_call:171]Changing address to "127.0.0.1" due to client request
[cluster:debug,2014-08-19T16:49:02.768,ns_1@127.0.0.1:ns_cluster<0.17151.0>:ns_cluster:handle_call:159]handling engage_cluster([{<<"requestedTargetNodeHostname">>,
<<"10.242.238.90">>},
{<<"availableStorage">>,
{struct,
[{<<"hdd">>,
[{struct,
[{<<"path">>,<<"/">>},
{<<"sizeKBytes">>,103212320},
{<<"usagePercent">>,3}]},
{struct,
[{<<"path">>,<<"/dev/shm">>},
{<<"sizeKBytes">>,49515824},
{<<"usagePercent">>,0}]},
{struct,
[{<<"path">>,<<"/boot">>},
{<<"sizeKBytes">>,198337},
{<<"usagePercent">>,17}]},
{struct,
[{<<"path">>,<<"/data">>},
{<<"sizeKBytes">>,329573012},
{<<"usagePercent">>,1}]},
{struct,
[{<<"path">>,<<"/test">>},
{<<"sizeKBytes">>,528447160},
{<<"usagePercent">>,1}]},
{struct,
[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"sizeKBytes">>,1922866992},
{<<"usagePercent">>,1}]}]}]}},
{<<"memoryQuota">>,90112},
{<<"storageTotals">>,
{struct,
[{<<"ram">>,
{struct,
[{<<"total">>,101408407552},
{<<"quotaTotal">>,94489280512},
{<<"quotaUsed">>,13369344000},
{<<"used">>,13174808576},
{<<"usedByData">>,31847576}]}},
{<<"hdd">>,
{struct,
[{<<"total">>,1969015799808},
{<<"quotaTotal">>,1969015799808},
{<<"used">>,19690157998},
{<<"usedByData">>,2736915},
{<<"free">>,1949325641810}]}}]}},
{<<"storage">>,
{struct,
[{<<"ssd">>,[]},
{<<"hdd">>,
[{struct,
[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"index_path">>,<<"/var/lib/pgsql">>},
{<<"quotaMb">>,<<"none">>},
{<<"state">>,<<"ok">>}]}]}]}},
{<<"systemStats">>,
{struct,
[{<<"cpu_utilization_rate">>,0.6265664160401002},
{<<"swap_total">>,0},
{<<"swap_used">>,0},
{<<"mem_total">>,101408407552},
{<<"mem_free">>,89866596352}]}},
{<<"interestingStats">>,
{struct,
[{<<"cmd_get">>,0.0},
{<<"couch_docs_actual_disk_size">>,2736915},
{<<"couch_docs_data_size">>,2729956},
{<<"couch_views_actual_disk_size">>,0},
{<<"couch_views_data_size">>,0},
{<<"curr_items">>,0},
{<<"curr_items_tot">>,0},
{<<"ep_bg_fetched">>,0.0},
{<<"get_hits">>,0.0},
{<<"mem_used">>,31847576},
{<<"ops">>,0.0},
{<<"vb_replica_curr_items">>,0}]}},
{<<"uptime">>,<<"4088">>},
{<<"memoryTotal">>,101408407552},
{<<"memoryFree">>,89866596352},
{<<"mcdMemoryReserved">>,77368},
{<<"mcdMemoryAllocated">>,77368},
{<<"couchApiBase">>,<<"http://10.242.238.88:8092/">>},
{<<"otpCookie">>,<<"xyzevwdfypcplvpp">>},
{<<"clusterMembership">>,<<"active">>},
{<<"status">>,<<"healthy">>},
{<<"otpNode">>,<<"ns_1@10.242.238.88">>},
{<<"thisNode">>,true},
{<<"hostname">>,<<"10.242.238.88:8091">>},
{<<"clusterCompatibility">>,131077},
{<<"version">>,<<"2.5.1-1083-rel-enterprise">>},
{<<"os">>,<<"x86_64-unknown-linux-gnu">>},
{<<"ports">>,
{struct,
[{<<"httpsMgmt">>,18091},
{<<"httpsCAPI">>,18092},
{<<"sslProxy">>,11214},
{<<"proxy">>,11211},
{<<"direct">>,11210}]}}])
[cluster:info,2014-08-19T16:49:02.770,ns_1@127.0.0.1:ns_cluster<0.17151.0>:ns_cluster:do_change_address:398]Decided to change address to "10.242.238.90"
[ns_server:debug,2014-08-19T16:49:02.770,ns_1@127.0.0.1:<0.17297.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17295.0>} exited with reason noconnection
[user:warn,2014-08-19T16:49:02.770,nonode@nohost:ns_node_disco<0.17177.0>:ns_node_disco:handle_info:165]Node nonode@nohost saw that node 'ns_1@127.0.0.1' went down. Details: [{nodedown_reason,
net_kernel_terminated}]
[error_logger:info,2014-08-19T16:49:02.770,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17331.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.771,nonode@nohost:dist_manager<0.267.0>:dist_manager:do_adjust_address:249]Adjusted IP to "10.242.238.90"
[ns_server:info,2014-08-19T16:49:02.771,nonode@nohost:dist_manager<0.267.0>:dist_manager:bringup:230]Attempting to bring up net_kernel with name 'ns_1@10.242.238.90'
[error_logger:info,2014-08-19T16:49:02.771,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.17334.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.771,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.17335.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[user:info,2014-08-19T16:49:02.772,ns_1@10.242.238.90:ns_node_disco<0.17177.0>:ns_node_disco:handle_info:159]Node 'ns_1@10.242.238.90' saw that node 'ns_1@10.242.238.90' came up. Tags: []
[ns_server:debug,2014-08-19T16:49:02.772,ns_1@10.242.238.90:<0.17322.0>:xdc_rdoc_replication_srv:nodeup_monitoring_loop:46]got nodeup event. Considering rdocs replication
[error_logger:info,2014-08-19T16:49:02.772,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.17336.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@10.242.238.90',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:02.772,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:save_node:143]saving node to "/opt/couchbase/var/lib/couchbase/couchbase-server.node"
[ns_server:debug,2014-08-19T16:49:02.772,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17321.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[error_logger:info,2014-08-19T16:49:02.772,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.17333.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@10.242.238.90',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:warn,2014-08-19T16:49:02.773,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17321.0>:xdc_rdoc_replication_srv:handle_info:150]Remote server node {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'} process down: noconnection
[ns_server:debug,2014-08-19T16:49:02.801,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:bringup:238]Attempted to save node name to disk: ok
[ns_server:info,2014-08-19T16:49:02.801,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:do_adjust_address:253]Re-setting cookie {alkbqedpsntmtnxa,'ns_1@10.242.238.90'}
[ns_server:info,2014-08-19T16:49:02.801,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:save_address_config:138]Deleting irrelevant ip file "/opt/couchbase/var/lib/couchbase/ip": ok
[ns_server:info,2014-08-19T16:49:02.802,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:save_address_config:139]saving ip config to "/opt/couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2014-08-19T16:49:02.835,ns_1@10.242.238.90:dist_manager<0.267.0>:dist_manager:do_adjust_address:260]Persisted the address successfully
[cluster:debug,2014-08-19T16:49:02.835,ns_1@10.242.238.90:<0.17329.0>:ns_cluster:maybe_rename:431]Renaming node from 'ns_1@127.0.0.1' to 'ns_1@10.242.238.90'.
[cluster:debug,2014-08-19T16:49:02.835,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf nodes_wanted -> nodes_wanted:
['ns_1@127.0.0.1'] ->
['ns_1@10.242.238.90']
[cluster:debug,2014-08-19T16:49:02.835,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf server_groups -> server_groups:
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]] ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@10.242.238.90']}]]
[cluster:debug,2014-08-19T16:49:02.835,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',capi_port} -> {node,
'ns_1@10.242.238.90',
capi_port}:
8092 ->
8092
[cluster:debug,2014-08-19T16:49:02.835,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',compaction_daemon} -> {node,
'ns_1@10.242.238.90',
compaction_daemon}:
[{check_interval,30},{min_file_size,131072}] ->
[{check_interval,30},{min_file_size,131072}]
[cluster:debug,2014-08-19T16:49:02.838,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',config_version} -> {node,
'ns_1@10.242.238.90',
config_version}:
{2,3,0} ->
{2,3,0}
[cluster:debug,2014-08-19T16:49:02.838,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',isasl} -> {node,
'ns_1@10.242.238.90',
isasl}:
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}] ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[cluster:debug,2014-08-19T16:49:02.838,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',membership} -> {node,
'ns_1@10.242.238.90',
membership}:
active ->
active
[cluster:debug,2014-08-19T16:49:02.838,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',memcached} -> {node,
'ns_1@10.242.238.90',
memcached}:
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}] ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[cluster:debug,2014-08-19T16:49:02.839,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',moxi} -> {node,
'ns_1@10.242.238.90',moxi}:
[{port,11211},{verbosity,[]}] ->
[{port,11211},{verbosity,[]}]
[cluster:debug,2014-08-19T16:49:02.839,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',ns_log} -> {node,
'ns_1@10.242.238.90',
ns_log}:
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}] ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[cluster:debug,2014-08-19T16:49:02.839,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',port_servers} -> {node,
'ns_1@10.242.238.90',
port_servers}:
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}] ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[cluster:debug,2014-08-19T16:49:02.840,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',rest} -> {node,
'ns_1@10.242.238.90',rest}:
[{port,8091},{port_meta,global}] ->
[{port,8091},{port_meta,global}]
[cluster:debug,2014-08-19T16:49:02.840,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',ssl_capi_port} -> {node,
'ns_1@10.242.238.90',
ssl_capi_port}:
18092 ->
18092
[cluster:debug,2014-08-19T16:49:02.840,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} -> {node,
'ns_1@10.242.238.90',
ssl_proxy_downstream_port}:
11214 ->
11214
[cluster:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} -> {node,
'ns_1@10.242.238.90',
ssl_proxy_upstream_port}:
11215 ->
11215
[cluster:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config<0.17155.0>:ns_cluster:rename_node_in_config:443]renaming node conf {node,'ns_1@127.0.0.1',ssl_rest_port} -> {node,
'ns_1@10.242.238.90',
ssl_rest_port}:
18091 ->
18091
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([nodes_wanted,server_groups,
{node,'ns_1@10.242.238.90',capi_port},
{node,'ns_1@10.242.238.90',compaction_daemon},
{node,'ns_1@10.242.238.90',config_version},
{node,'ns_1@10.242.238.90',isasl},
{node,'ns_1@10.242.238.90',membership},
{node,'ns_1@10.242.238.90',memcached},
{node,'ns_1@10.242.238.90',moxi},
{node,'ns_1@10.242.238.90',ns_log},
{node,'ns_1@10.242.238.90',port_servers},
{node,'ns_1@10.242.238.90',rest},
{node,'ns_1@10.242.238.90',ssl_capi_port},
{node,'ns_1@10.242.238.90',
ssl_proxy_downstream_port},
{node,'ns_1@10.242.238.90',
ssl_proxy_upstream_port},
{node,'ns_1@10.242.238.90',ssl_rest_port}]..)
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:mb_master<0.17213.0>:mb_master:update_peers:506]List of peers has changed from ['ns_1@127.0.0.1'] to ['ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:02.841,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:02.842,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:02.842,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:02.844,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:02.844,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:02.844,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:02.844,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',membership} ->
active
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_node_disco_events<0.17176.0>:ns_node_disco_rep_events:handle_event:42]Detected a new nodes (['ns_1@10.242.238.90']). Moving config around.
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:<0.17204.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17202.0>} exited with reason shutdown
[ns_server:info,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_node_disco_events<0.17176.0>:ns_node_disco_log:handle_event:46]ns_node_disco_log: nodes changed: ['ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',config_version} ->
{2,3,0}
[ns_server:info,2014-08-19T16:49:02.845,ns_1@10.242.238.90:mb_master<0.17213.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[error_logger:info,2014-08-19T16:49:02.845,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17355.0>},
{name,ns_doctor},
{mfa,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:<0.17214.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17213.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@10.242.238.90']}]]
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:02.845,ns_1@10.242.238.90:ns_server_sup<0.17163.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2014-08-19T16:49:02.845,ns_1@10.242.238.90:mb_master<0.17362.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:info,2014-08-19T16:49:02.846,ns_1@10.242.238.90:ns_log<0.17167.0>:ns_log:handle_cast:183]suppressing duplicate log mb_master:undefined([<<"I'm the only node, so I'm the master.">>]) because it's been seen 1 times in the past 0.224171 secs (last seen 0.224171 secs ago
[ns_server:debug,2014-08-19T16:49:02.846,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:02.846,ns_1@10.242.238.90:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([dynamic_config_version]..)
[ns_server:debug,2014-08-19T16:49:02.846,ns_1@10.242.238.90:mb_master_sup<0.17364.0>:misc:start_singleton:986]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.17365.0> on 'ns_1@10.242.238.90'
[error_logger:info,2014-08-19T16:49:02.846,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17365.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,20},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.847,ns_1@10.242.238.90:mb_master_sup<0.17364.0>:misc:start_singleton:986]start_singleton(gen_server, ns_tick, [], []): started as <0.17367.0> on 'ns_1@10.242.238.90'
[error_logger:info,2014-08-19T16:49:02.847,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17367.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:02.847,ns_1@10.242.238.90:<0.17368.0>:auto_failover:init:134]init auto_failover.
[ns_server:debug,2014-08-19T16:49:02.847,ns_1@10.242.238.90:mb_master_sup<0.17364.0>:misc:start_singleton:986]start_singleton(gen_server, auto_failover, [], []): started as <0.17368.0> on 'ns_1@10.242.238.90'
[cluster:info,2014-08-19T16:49:02.847,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:do_change_address:404]Renamed node. New name is 'ns_1@10.242.238.90'.
[error_logger:info,2014-08-19T16:49:02.847,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.17368.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:02.847,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17362.0>},
{name,mb_master},
{mfa,{mb_master,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[user:info,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_init:86]Initial otp cookie generated: nntvfgasfojamdnn
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:<0.17351.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.90'], with cookie: alkbqedpsntmtnxa
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[cluster:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:handle_call:161]engage_cluster(..) -> {ok,ok}
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([otp]..)
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:<0.17351.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.90'], with cookie: nntvfgasfojamdnn
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,nntvfgasfojamdnn}]
[ns_server:debug,2014-08-19T16:49:02.895,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:02.896,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[cluster:debug,2014-08-19T16:49:02.906,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:handle_call:165]handling complete_join([{<<"targetNode">>,<<"ns_1@10.242.238.90">>},
{<<"availableStorage">>,
{struct,
[{<<"hdd">>,
[{struct,
[{<<"path">>,<<"/">>},
{<<"sizeKBytes">>,103212320},
{<<"usagePercent">>,3}]},
{struct,
[{<<"path">>,<<"/dev/shm">>},
{<<"sizeKBytes">>,49515824},
{<<"usagePercent">>,0}]},
{struct,
[{<<"path">>,<<"/boot">>},
{<<"sizeKBytes">>,198337},
{<<"usagePercent">>,17}]},
{struct,
[{<<"path">>,<<"/data">>},
{<<"sizeKBytes">>,329573012},
{<<"usagePercent">>,1}]},
{struct,
[{<<"path">>,<<"/test">>},
{<<"sizeKBytes">>,528447160},
{<<"usagePercent">>,1}]},
{struct,
[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"sizeKBytes">>,1922866992},
{<<"usagePercent">>,1}]}]}]}},
{<<"memoryQuota">>,90112},
{<<"storageTotals">>,
{struct,
[{<<"ram">>,
{struct,
[{<<"total">>,101408407552},
{<<"quotaTotal">>,94489280512},
{<<"quotaUsed">>,13369344000},
{<<"used">>,13174808576},
{<<"usedByData">>,31847576}]}},
{<<"hdd">>,
{struct,
[{<<"total">>,1969015799808},
{<<"quotaTotal">>,1969015799808},
{<<"used">>,19690157998},
{<<"usedByData">>,2736915},
{<<"free">>,1949325641810}]}}]}},
{<<"storage">>,
{struct,
[{<<"ssd">>,[]},
{<<"hdd">>,
[{struct,
[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"index_path">>,<<"/var/lib/pgsql">>},
{<<"quotaMb">>,<<"none">>},
{<<"state">>,<<"ok">>}]}]}]}},
{<<"systemStats">>,
{struct,
[{<<"cpu_utilization_rate">>,0.6265664160401002},
{<<"swap_total">>,0},
{<<"swap_used">>,0},
{<<"mem_total">>,101408407552},
{<<"mem_free">>,89866596352}]}},
{<<"interestingStats">>,
{struct,
[{<<"cmd_get">>,0.0},
{<<"couch_docs_actual_disk_size">>,2736915},
{<<"couch_docs_data_size">>,2729956},
{<<"couch_views_actual_disk_size">>,0},
{<<"couch_views_data_size">>,0},
{<<"curr_items">>,0},
{<<"curr_items_tot">>,0},
{<<"ep_bg_fetched">>,0.0},
{<<"get_hits">>,0.0},
{<<"mem_used">>,31847576},
{<<"ops">>,0.0},
{<<"vb_replica_curr_items">>,0}]}},
{<<"uptime">>,<<"4088">>},
{<<"memoryTotal">>,101408407552},
{<<"memoryFree">>,89866596352},
{<<"mcdMemoryReserved">>,77368},
{<<"mcdMemoryAllocated">>,77368},
{<<"couchApiBase">>,<<"http://10.242.238.88:8092/">>},
{<<"otpCookie">>,<<"xyzevwdfypcplvpp">>},
{<<"clusterMembership">>,<<"active">>},
{<<"status">>,<<"healthy">>},
{<<"otpNode">>,<<"ns_1@10.242.238.88">>},
{<<"thisNode">>,true},
{<<"hostname">>,<<"10.242.238.88:8091">>},
{<<"clusterCompatibility">>,131077},
{<<"version">>,<<"2.5.1-1083-rel-enterprise">>},
{<<"os">>,<<"x86_64-unknown-linux-gnu">>},
{<<"ports">>,
{struct,
[{<<"httpsMgmt">>,18091},
{<<"httpsCAPI">>,18092},
{<<"sslProxy">>,11214},
{<<"proxy">>,11211},
{<<"direct">>,11210}]}}])
[user:info,2014-08-19T16:49:02.908,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:897]Node 'ns_1@10.242.238.90' is joining cluster via node 'ns_1@10.242.238.88'.
[ns_server:debug,2014-08-19T16:49:02.908,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
i_am_a_dead_man ->
true
[ns_server:debug,2014-08-19T16:49:02.908,ns_1@10.242.238.90:<0.17324.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17323.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:02.908,ns_1@10.242.238.90:ns_config_rep<0.17183.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([i_am_a_dead_man]..)
[ns_server:debug,2014-08-19T16:49:02.908,ns_1@10.242.238.90:<0.17320.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17319.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:02.945,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:02.945,ns_1@10.242.238.90:<0.17370.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.90'], with cookie: nntvfgasfojamdnn
[ns_server:debug,2014-08-19T16:49:02.945,ns_1@10.242.238.90:<0.17370.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.90'], with cookie: nntvfgasfojamdnn
[error_logger:error,2014-08-19T16:49:02.953,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Connection attempt from disallowed node 'ns_1@10.242.238.88' **
[error_logger:error,2014-08-19T16:49:02.971,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_msg:119]** Connection attempt from disallowed node 'ns_1@10.242.238.89' **
[ns_server:debug,2014-08-19T16:49:03.190,ns_1@10.242.238.90:<0.17317.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_stats_event,<0.17316.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.191,ns_1@10.242.238.90:<0.17315.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_tick_event,<0.17313.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.191,ns_1@10.242.238.90:<0.17312.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17311.0>} exited with reason shutdown
[error_logger:error,2014-08-19T16:49:03.191,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_bucket_sup}
Context: shutdown_error
Reason: normal
Offender: [{pid,<0.17312.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.191,ns_1@10.242.238.90:<0.17332.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17331.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:49:03.191,ns_1@10.242.238.90:<0.17299.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17296.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:49:03.192,ns_1@10.242.238.90:<0.17233.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17232.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.192,ns_1@10.242.238.90:<0.17229.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {master_activity_events,<0.17228.0>} exited with reason killed
[ns_server:info,2014-08-19T16:49:03.192,ns_1@10.242.238.90:mb_master<0.17362.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[ns_server:debug,2014-08-19T16:49:03.192,ns_1@10.242.238.90:<0.17363.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17362.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.192,ns_1@10.242.238.90:<0.17356.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17355.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.192,ns_1@10.242.238.90:<0.17200.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {buckets_events,<0.17199.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.193,ns_1@10.242.238.90:<0.17192.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17190.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:49:03.193,ns_1@10.242.238.90:<0.17187.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17186.0>} exited with reason killed
[ns_server:debug,2014-08-19T16:49:03.193,ns_1@10.242.238.90:<0.17184.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events_local,<0.17183.0>} exited with reason shutdown
[ns_server:debug,2014-08-19T16:49:03.193,ns_1@10.242.238.90:<0.17172.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17171.0>} exited with reason shutdown
[error_logger:error,2014-08-19T16:49:03.193,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================CRASH REPORT=========================
crasher:
initial call: gen_event:init_it/6
pid: <0.17191.0>
registered_name: bucket_info_cache_invalidations
exception exit: killed
in function gen_event:terminate_server/4
ancestors: [bucket_info_cache,ns_server_sup,ns_server_cluster_sup,
<0.58.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 233
stack_size: 24
reductions: 133
neighbours:
[ns_server:debug,2014-08-19T16:49:03.193,ns_1@10.242.238.90:<0.17170.0>:ns_pubsub:do_subscribe_link:136]Parent process of subscription {ns_config_events,<0.17169.0>} exited with reason killed
[cluster:debug,2014-08-19T16:49:03.195,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:905]ns_cluster: joining cluster. Child has exited.
[cluster:debug,2014-08-19T16:49:03.199,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:908]Deleted _replicator db: ok.
[ns_server:debug,2014-08-19T16:49:03.199,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,xyzevwdfypcplvpp}]
[ns_server:debug,2014-08-19T16:49:03.200,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.90','ns_1@10.242.238.88']
[ns_server:debug,2014-08-19T16:49:03.200,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
undefined
[cluster:debug,2014-08-19T16:49:03.200,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:927]pre-join cleaned config is:
{config,{},
[[],
[{directory,"/opt/couchbase/var/lib/couchbase/config"},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{fast_warmup,
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{check_interval,30},{min_file_size,131072}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',membership},active},
{rest,[{port,8091}]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{node,'ns_1@127.0.0.1',rest},[{port,8091},{port_meta,global}]},
{{node,'ns_1@127.0.0.1',ssl_rest_port},18091},
{{node,'ns_1@127.0.0.1',capi_port},8092},
{{node,'ns_1@127.0.0.1',ssl_capi_port},18092},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},11214},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},11215},
{rest_creds,[{creds,[]}]},
{remote_clusters,[]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"2476fca958629f521e96d4f683277efd">>,{1,63575671742}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"2476fca958629f521e96d4f683277efd">>,{1,63575671742}}]},
{port,11210},
{mccouch_port,11213},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,[]},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{verbosity,[]}]},
{memory_quota,58026},
{buckets,[{configs,[]}]},
{{node,'ns_1@127.0.0.1',moxi},[{port,11211},{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",
{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"2476fca958629f521e96d4f683277efd">>,{1,63575671742}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},
{pass,[]},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,
auto_failover_cluster_too_small,ip,disk,overhead,
ep_oom_errors,ep_item_commit_failed]}]},
{alert_limits,[{max_overhead_perc,50},{max_disk_used,90}]},
{replication,[{enabled,true}]},
{auto_failover_cfg,
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{drop_request_memory_threshold_mib,undefined},
{replication_topology,star}]],
[[{cluster_compat_version,undefined},
{nodes_wanted,['ns_1@10.242.238.90','ns_1@10.242.238.88']},
{otp,
[{'_vclock',
[{'ns_1@10.242.238.90',{2,63575671743}},
{'ns_1@127.0.0.1',{1,63575667473}}]},
{cookie,xyzevwdfypcplvpp}]},
{{node,'ns_1@10.242.238.90',capi_port},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]}|8092]},
{{node,'ns_1@10.242.238.90',compaction_daemon},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]},
{check_interval,30},
{min_file_size,131072}]},
{{node,'ns_1@10.242.238.90',config_version},
[{'_vclock',
[{'ns_1@10.242.238.90',{1,63575671742}},
{'ns_1@127.0.0.1',{7,63575667472}}]}|
{2,3,0}]},
{{node,'ns_1@10.242.238.90',isasl},
[{'_vclock',
[{'ns_1@10.242.238.90',{1,63575671742}},
{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@10.242.238.90',memcached},
[{'_vclock',
[{'ns_1@10.242.238.90',{1,63575671742}},
{'ns_1@127.0.0.1',{3,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"f6126ae5fac44bf3d8316165791747f2"},
{verbosity,[]}]},
{{node,'ns_1@10.242.238.90',moxi},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@10.242.238.90',ns_log},
[{'_vclock',
[{'ns_1@10.242.238.90',{1,63575671742}},
{'ns_1@127.0.0.1',{1,63575667472}},
{<<"c3a87fe2e8c58375a03730a71fdf48a8">>,{1,63575667472}}]},
{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@10.242.238.90',port_servers},
[{'_vclock',
[{'ns_1@10.242.238.90',{1,63575671742}},
{'ns_1@127.0.0.1',{3,63575667472}}]},
{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",
{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,
stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,
[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]},
{{node,'ns_1@10.242.238.90',rest},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@10.242.238.90',ssl_capi_port},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]}|18092]},
{{node,'ns_1@10.242.238.90',ssl_proxy_downstream_port},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]}|11214]},
{{node,'ns_1@10.242.238.90',ssl_proxy_upstream_port},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]}|11215]},
{{node,'ns_1@10.242.238.90',ssl_rest_port},
[{'_vclock',[{'ns_1@10.242.238.90',{1,63575671742}}]}|18091]}]],
ns_config_default}
[ns_server:debug,2014-08-19T16:49:03.202,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[user:info,2014-08-19T16:49:03.202,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:130]Node 'ns_1@10.242.238.90' synchronized otp cookie xyzevwdfypcplvpp from cluster
[ns_server:debug,2014-08-19T16:49:03.202,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:03.263,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[cluster:debug,2014-08-19T16:49:03.265,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:931]Connection from 'ns_1@10.242.238.90' to 'ns_1@10.242.238.88': true
[cluster:debug,2014-08-19T16:49:03.265,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:943]Join status: {ok,ok}, starting ns_server_cluster back
[ns_server:info,2014-08-19T16:49:03.266,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:dir_size:start_link:47]Starting quick version of dir_size with program name: i386-linux-godu
[error_logger:info,2014-08-19T16:49:03.266,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17393.0>},
{name,diag_handler_worker},
{mfa,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.266,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17394.0>},
{name,dir_size},
{mfa,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.266,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17395.0>},
{name,request_throttler},
{mfa,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:warn,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_log<0.17396.0>:ns_log:read_logs:123]Couldn't load logs from "/opt/couchbase/var/lib/couchbase/ns_log" (perhaps it's first startup): {error,
enoent}
[ns_doctor:error,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_log<0.17396.0>:ns_doctor:get_node:195]Error attempting to get node 'ns_1@10.242.238.88': {exit,
{noproc,
{gen_server,call,
[ns_doctor,
{get_node,
'ns_1@10.242.238.88'}]}}}
[error_logger:info,2014-08-19T16:49:03.267,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17396.0>},
{name,ns_log},
{mfa,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_config_isasl_sync<0.17399.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/opt/couchbase/var/lib/couchbase/isasl.pw","_admin",
"f6126ae5fac44bf3d8316165791747f2"]
[ns_server:debug,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
undefined
[ns_server:debug,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_config_isasl_sync<0.17399.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: []
[error_logger:info,2014-08-19T16:49:03.267,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17397.0>},
{name,ns_crash_log_consumer},
{mfa,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.267,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.90','ns_1@10.242.238.88']
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,xyzevwdfypcplvpp}]
[error_logger:info,2014-08-19T16:49:03.268,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17398.0>},
{name,ns_config_ets_dup},
{mfa,{ns_config_ets_dup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_isasl_sync<0.17399.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/isasl.pw"
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.268,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.269,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.269,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:03.269,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:03.269,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.269,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:03.270,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.270,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_rest_port} ->
18091
[error_logger:info,2014-08-19T16:49:03.270,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17399.0>},
{name,ns_config_isasl_sync},
{mfa,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.270,ns_1@10.242.238.90:ns_node_disco<0.17408.0>:ns_node_disco:init:103]Initting ns_node_disco with ['ns_1@10.242.238.88']
[error_logger:info,2014-08-19T16:49:03.270,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17405.0>},
{name,ns_log_events},
{mfa,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.270,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[error_logger:info,2014-08-19T16:49:03.270,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17407.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.271,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:03.315,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:03.315,ns_1@10.242.238.90:<0.17409.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:03.315,ns_1@10.242.238.90:<0.17409.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[error_logger:info,2014-08-19T16:49:03.316,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17408.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.316,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:init:66]init pulling
[error_logger:info,2014-08-19T16:49:03.316,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17411.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:03.316,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:do_pull:341]Pulling config from: 'ns_1@10.242.238.88'
[error_logger:info,2014-08-19T16:49:03.316,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17412.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.316,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17413.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.324,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T16:49:03.324,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:init:68]init pushing
[ns_server:debug,2014-08-19T16:49:03.324,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T16:49:03.324,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T16:49:03.324,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T16:49:03.325,ns_1@10.242.238.90:ns_config_isasl_sync<0.17399.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/opt/couchbase/var/lib/couchbase/isasl.pw"
[ns_server:debug,2014-08-19T16:49:03.325,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:03.325,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[[{map,[{0,[],['ns_1@10.242.238.88',undefined]},
{1,[],['ns_1@10.242.238.88',undefined]},
{2,[],['ns_1@10.242.238.88',undefined]},
{3,[],['ns_1@10.242.238.88',undefined]},
{4,[],['ns_1@10.242.238.88',undefined]},
{5,[],['ns_1@10.242.238.88',undefined]},
{6,[],['ns_1@10.242.238.88',undefined]},
{7,[],['ns_1@10.242.238.88',undefined]},
{8,[],['ns_1@10.242.238.88',undefined]},
{9,[],['ns_1@10.242.238.88',undefined]},
{10,[],['ns_1@10.242.238.88',undefined]},
{11,[],['ns_1@10.242.238.88',undefined]},
{12,[],['ns_1@10.242.238.88',undefined]},
{13,[],['ns_1@10.242.238.88',undefined]},
{14,[],['ns_1@10.242.238.88',undefined]},
{15,[],['ns_1@10.242.238.88',undefined]},
{16,[],['ns_1@10.242.238.88',undefined]},
{17,[],['ns_1@10.242.238.88',undefined]},
{18,[],['ns_1@10.242.238.88',undefined]},
{19,[],['ns_1@10.242.238.88',undefined]},
{20,[],['ns_1@10.242.238.88',undefined]},
{21,[],['ns_1@10.242.238.88',undefined]},
{22,[],['ns_1@10.242.238.88',undefined]},
{23,[],['ns_1@10.242.238.88',undefined]},
{24,[],['ns_1@10.242.238.88',undefined]},
{25,[],['ns_1@10.242.238.88',undefined]},
{26,[],['ns_1@10.242.238.88',undefined]},
{27,[],['ns_1@10.242.238.88',undefined]},
{28,[],['ns_1@10.242.238.88',undefined]},
{29,[],['ns_1@10.242.238.88',undefined]},
{30,[],['ns_1@10.242.238.88',undefined]},
{31,[],['ns_1@10.242.238.88',undefined]},
{32,[],['ns_1@10.242.238.88',undefined]},
{33,[],['ns_1@10.242.238.88',undefined]},
{34,[],['ns_1@10.242.238.88',undefined]},
{35,[],['ns_1@10.242.238.88',undefined]},
{36,[],['ns_1@10.242.238.88',undefined]},
{37,[],['ns_1@10.242.238.88',undefined]},
{38,[],['ns_1@10.242.238.88',undefined]},
{39,[],['ns_1@10.242.238.88',undefined]},
{40,[],['ns_1@10.242.238.88',undefined]},
{41,[],['ns_1@10.242.238.88',undefined]},
{42,[],['ns_1@10.242.238.88',undefined]},
{43,[],['ns_1@10.242.238.88',undefined]},
{44,[],['ns_1@10.242.238.88',undefined]},
{45,[],['ns_1@10.242.238.88',undefined]},
{46,[],['ns_1@10.242.238.88',undefined]},
{47,[],['ns_1@10.242.238.88',undefined]},
{48,[],['ns_1@10.242.238.88',undefined]},
{49,[],['ns_1@10.242.238.88',undefined]},
{50,[],['ns_1@10.242.238.88',undefined]},
{51,[],['ns_1@10.242.238.88',undefined]},
{52,[],['ns_1@10.242.238.88',undefined]},
{53,[],['ns_1@10.242.238.88',undefined]},
{54,[],['ns_1@10.242.238.88',undefined]},
{55,[],['ns_1@10.242.238.88',undefined]},
{56,[],['ns_1@10.242.238.88',undefined]},
{57,[],['ns_1@10.242.238.88',undefined]},
{58,[],['ns_1@10.242.238.88',undefined]},
{59,[],['ns_1@10.242.238.88',undefined]},
{60,[],['ns_1@10.242.238.88',undefined]},
{61,[],['ns_1@10.242.238.88',undefined]},
{62,[],['ns_1@10.242.238.88',undefined]},
{63,[],['ns_1@10.242.238.88',undefined]},
{64,[],['ns_1@10.242.238.88',undefined]},
{65,[],['ns_1@10.242.238.88',undefined]},
{66,[],['ns_1@10.242.238.88',undefined]},
{67,[],['ns_1@10.242.238.88',undefined]},
{68,[],['ns_1@10.242.238.88',undefined]},
{69,[],['ns_1@10.242.238.88',undefined]},
{70,[],['ns_1@10.242.238.88',undefined]},
{71,[],['ns_1@10.242.238.88',undefined]},
{72,[],['ns_1@10.242.238.88',undefined]},
{73,[],['ns_1@10.242.238.88',undefined]},
{74,[],['ns_1@10.242.238.88',undefined]},
{75,[],['ns_1@10.242.238.88',undefined]},
{76,[],['ns_1@10.242.238.88',undefined]},
{77,[],['ns_1@10.242.238.88',undefined]},
{78,[],['ns_1@10.242.238.88',undefined]},
{79,[],['ns_1@10.242.238.88',undefined]},
{80,[],['ns_1@10.242.238.88',undefined]},
{81,[],['ns_1@10.242.238.88',undefined]},
{82,[],['ns_1@10.242.238.88',undefined]},
{83,[],['ns_1@10.242.238.88',undefined]},
{84,[],['ns_1@10.242.238.88',undefined]},
{85,[],['ns_1@10.242.238.88',undefined]},
{86,[],['ns_1@10.242.238.88',undefined]},
{87,[],['ns_1@10.242.238.88'|...]},
{88,[],[...]},
{89,[],...},
{90,...},
{...}|...]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88']},
{map_opts_hash,133465355}]]}]
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQOGMt4U8wCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5WgTuSJMU8qPdc8uDdst\nav13oFxDpbqz8mIk7TVReVHwO9MvKgi8cqlGev50BaQNfzFW41E/baDmpa8sAlSe\nzPoGcRD5wDJdHRH87FdW8eeE4rA8N9TcsSyJDo0gmWO+Vj+ow5dzF87001UstU6A\n5UQ5anT0dGnKLChpmk0KiKx28+XSnycDQ8osiLR"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T16:49:03.328,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
memory_quota ->
90112
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
read_only_user_creds ->
null
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T16:49:03.329,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},
{name,<<"Group 1">>},
{nodes,['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90']}]]
[ns_server:debug,2014-08-19T16:49:03.330,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:init:72]init reannouncing
[ns_server:debug,2014-08-19T16:49:03.330,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T16:49:03.331,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
settings ->
[{stats,[{send_stats,false}]}]
[ns_server:debug,2014-08-19T16:49:03.331,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
uuid ->
<<"9032e293d656a8b04683554c561fe06f">>
[ns_server:debug,2014-08-19T16:49:03.331,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[error_logger:info,2014-08-19T16:49:03.331,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.17414.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.331,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17406.0>},
{name,ns_node_disco_sup},
{mfa,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:03.331,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:do_push_keys:317]Replicating some config keys ([alert_limits,auto_failover_cfg,autocompaction,
buckets,cert_and_pkey,cluster_compat_version,
drop_request_memory_threshold_mib,
dynamic_config_version,email_alerts,
fast_warmup,index_aware_rebalance_disabled,
max_bucket_count,memory_quota,nodes_wanted,otp,
read_only_user_creds,remote_clusters,
replication,replication_topology,rest,
rest_creds,server_groups,
set_view_update_daemon,settings,uuid,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{request_limit,capi},
{request_limit,rest},
{node,'ns_1@10.242.238.88',capi_port},
{node,'ns_1@10.242.238.88',compaction_daemon},
{node,'ns_1@10.242.238.88',config_version},
{node,'ns_1@10.242.238.88',isasl},
{node,'ns_1@10.242.238.88',membership},
{node,'ns_1@10.242.238.88',memcached},
{node,'ns_1@10.242.238.88',moxi},
{node,'ns_1@10.242.238.88',ns_log},
{node,'ns_1@10.242.238.88',port_servers},
{node,'ns_1@10.242.238.88',rest},
{node,'ns_1@10.242.238.88',ssl_capi_port},
{node,'ns_1@10.242.238.88',
ssl_proxy_downstream_port},
{node,'ns_1@10.242.238.88',
ssl_proxy_upstream_port},
{node,'ns_1@10.242.238.88',ssl_rest_port},
{node,'ns_1@10.242.238.89',capi_port},
{node,'ns_1@10.242.238.89',compaction_daemon},
{node,'ns_1@10.242.238.89',config_version},
{node,'ns_1@10.242.238.89',isasl},
{node,'ns_1@10.242.238.89',membership},
{node,'ns_1@10.242.238.89',memcached},
{node,'ns_1@10.242.238.89',moxi},
{node,'ns_1@10.242.238.89',ns_log},
{node,'ns_1@10.242.238.89',port_servers},
{node,'ns_1@10.242.238.89',rest},
{node,'ns_1@10.242.238.89',ssl_capi_port},
{node,'ns_1@10.242.238.89',
ssl_proxy_downstream_port},
{node,'ns_1@10.242.238.89',
ssl_proxy_upstream_port},
{node,'ns_1@10.242.238.89',ssl_rest_port},
{node,'ns_1@10.242.238.90',capi_port},
{node,'ns_1@10.242.238.90',compaction_daemon},
{node,'ns_1@10.242.238.90',config_version},
{node,'ns_1@10.242.238.90',isasl},
{node,'ns_1@10.242.238.90',membership},
{node,'ns_1@10.242.238.90',memcached}]..)
[ns_server:debug,2014-08-19T16:49:03.333,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[{[['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88'|...],
[...]|...],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2014-08-19T16:49:03.333,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T16:49:03.333,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2014-08-19T16:49:03.333,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T16:49:03.333,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[error_logger:info,2014-08-19T16:49:03.334,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17423.0>},
{name,vbucket_map_mirror},
{mfa,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.334,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',membership} ->
active
[ns_server:debug,2014-08-19T16:49:03.335,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.335,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.335,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.335,ns_1@10.242.238.90:ns_log_events<0.17405.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2014-08-19T16:49:03.335,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17427.0>},
{name,bucket_info_cache},
{mfa,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.335,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17430.0>},
{name,ns_tick_event},
{mfa,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17431.0>},
{name,buckets_events},
{mfa,{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@10.242.238.90'},
{latest,"minute"}]}}}
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.17433.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',rest} ->
[{port,8091},{port_meta,global}]
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17432.0>},
{name,ns_mail_sup},
{mfa,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_proxy_downstream_port} ->
11214
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17434.0>},
{name,ns_stats_event},
{mfa,{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1},
{proc_lib,init_p_do_apply,3}]}
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17437.0>},
{name,samples_loader_tasks},
{mfa,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:03.336,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',capi_port} ->
8092
[error_logger:info,2014-08-19T16:49:03.336,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17438.0>},
{name,ns_heart},
{mfa,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',config_version} ->
{2,3,0}
[error_logger:info,2014-08-19T16:49:03.337,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17441.0>},
{name,ns_doctor},
{mfa,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',membership} ->
inactiveAdded
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.337,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.338,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.338,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:03.338,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:03.338,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.338,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',membership} ->
inactiveAdded
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90}]
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T16:49:03.339,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cert_and_pkey ->
{<<"-----BEGIN CERTIFICATE-----\nMIICmDCCAYKgAwIBAgIIE4vQOGMt4U8wCwYJKoZIhvcNAQEFMAwxCjAIBgNVBAMT\nASowHhcNMTMwMTAxMDAwMDAwWhcNNDkxMjMxMjM1OTU5WjAMMQowCAYDVQQDEwEq\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5WgTuSJMU8qPdc8uDdst\nav13oFxDpbqz8mIk7TVReVHwO9MvKgi8cqlGev50BaQNfzFW41E/baDmpa8sAlSe\nzPoGcRD5wDJdHRH87FdW8eeE4rA8N9TcsSyJDo0gmWO+Vj+ow5dzF87001UstU6A\n5UQ5anT0dGnKLChpmk0KiKx28+XSnycDQ8osiLR"...>>,
<<"*****">>}
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
cluster_compat_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
drop_request_memory_threshold_mib ->
undefined
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
dynamic_config_version ->
[2,5]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,ip,
disk,overhead,ep_oom_errors,ep_item_commit_failed]}]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
fast_warmup ->
[{fast_warmup_enabled,true},
{min_memory_threshold,10},
{min_items_threshold,10}]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
max_bucket_count ->
10
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
memory_quota ->
90112
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,xyzevwdfypcplvpp}]
[ns_server:debug,2014-08-19T16:49:03.340,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
read_only_user_creds ->
null
[ns_server:debug,2014-08-19T16:49:03.341,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
remote_clusters ->
[]
[ns_server:debug,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
replication_topology ->
star
[ns_server:debug,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rest ->
[{port,8091}]
[ns_server:info,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:handle_info:63]config change: rest_creds -> ********
[ns_server:debug,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},
{name,<<"Group 1">>},
{nodes,['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90']}]]
[ns_server:debug,2014-08-19T16:49:03.342,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2014-08-19T16:49:03.343,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
settings ->
[{stats,[{send_stats,false}]}]
[ns_server:debug,2014-08-19T16:49:03.343,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
uuid ->
<<"9032e293d656a8b04683554c561fe06f">>
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[{[['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88'|...],
[...]|...],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:03.344,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.345,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',membership} ->
active
[ns_server:debug,2014-08-19T16:49:03.345,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.345,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.345,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.345,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.88',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.346,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',membership} ->
inactiveAdded
[ns_server:debug,2014-08-19T16:49:03.347,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.347,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.347,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.347,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:03.347,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',membership} ->
inactiveAdded
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.348,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:03.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',ssl_rest_port} ->
18091
[ns_server:info,2014-08-19T16:49:03.365,ns_1@10.242.238.90:remote_clusters_info<0.17445.0>:remote_clusters_info:read_or_create_table:540]Reading remote_clusters_info content from /opt/couchbase/var/lib/couchbase/remote_clusters_cache_v3
[ns_server:debug,2014-08-19T16:49:03.365,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:03.365,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:03.365,ns_1@10.242.238.90:<0.17420.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:03.366,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:03.366,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: ['ns_1@10.242.238.88']
[error_logger:info,2014-08-19T16:49:03.366,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17445.0>},
{name,remote_clusters_info},
{mfa,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.366,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17449.0>},
{name,master_activity_events},
{mfa,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.366,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:mb_master:check_master_takeover_needed:143]Got replies: ['ns_1@10.242.238.88']
[ns_server:debug,2014-08-19T16:49:03.367,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:mb_master:check_master_takeover_needed:156]Checking version of current master: 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:03.367,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:mb_master:check_master_takeover_needed:174]Current master's supported compat version: [2,5,1]
[ns_server:debug,2014-08-19T16:49:03.367,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:mb_master:check_master_takeover_needed:181]Current master is not older
[ns_server:debug,2014-08-19T16:49:03.367,ns_1@10.242.238.90:mb_master<0.17457.0>:mb_master:init:96]Starting as candidate. Peers: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90']
[ns_server:debug,2014-08-19T16:49:03.367,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:current_status_slow:248]Ignoring failure to grab system stats:
{'EXIT',{noproc,{gen_server,call,
[{'stats_reader-@system','ns_1@10.242.238.90'},
{latest,"minute"}]}}}
[error_logger:info,2014-08-19T16:49:03.368,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17457.0>},
{name,mb_master},
{mfa,{mb_master,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:03.368,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:grab_local_xdcr_replications:438]Ignoring exception getting xdcr replication infos
{exit,{noproc,{gen_server,call,[xdc_replication_sup,which_children,infinity]}},
[{gen_server,call,3},
{xdc_replication_sup,all_local_replication_infos,0},
{ns_heart,grab_local_xdcr_replications,0},
{ns_heart,current_status_slow,0},
{ns_heart,slow_updater_loop,1}]}
[error_logger:info,2014-08-19T16:49:03.368,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17460.0>},
{name,master_activity_events_ingress},
{mfa,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[user:info,2014-08-19T16:49:03.368,ns_1@10.242.238.90:ns_node_disco<0.17408.0>:ns_node_disco:handle_info:159]Node 'ns_1@10.242.238.90' saw that node 'ns_1@10.242.238.89' came up. Tags: []
[error_logger:info,2014-08-19T16:49:03.368,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17461.0>},
{name,master_activity_events_timestamper},
{mfa,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.368,ns_1@10.242.238.90:ns_node_disco_events<0.17407.0>:ns_node_disco_rep_events:handle_event:42]Detected a new nodes (['ns_1@10.242.238.89']). Moving config around.
[error_logger:info,2014-08-19T16:49:03.368,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17462.0>},
{name,master_activity_events_pids_watcher},
{mfa,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:03.369,ns_1@10.242.238.90:ns_node_disco_events<0.17407.0>:ns_node_disco_log:handle_event:46]ns_node_disco_log: nodes changed: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90']
[ns_server:info,2014-08-19T16:49:03.369,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_info:220]Replicating config to/from:
['ns_1@10.242.238.89']
[ns_server:debug,2014-08-19T16:49:03.369,ns_1@10.242.238.90:<0.17420.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:info,2014-08-19T16:49:03.369,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:do_pull:341]Pulling config from: 'ns_1@10.242.238.89'
[ns_server:info,2014-08-19T16:49:03.374,ns_1@10.242.238.90:ns_doctor<0.17441.0>:ns_doctor:update_status:241]The following buckets became ready on node 'ns_1@10.242.238.88': ["default"]
[error_logger:info,2014-08-19T16:49:03.384,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17463.0>},
{name,master_activity_events_keeper},
{mfa,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.385,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_info:225]config pull_and_push done.
[ns_server:debug,2014-08-19T16:49:03.392,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:03.392,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:03.392,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:03.392,ns_1@10.242.238.90:<0.17425.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:03.393,ns_1@10.242.238.90:<0.17425.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[error_logger:info,2014-08-19T16:49:03.424,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17471.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.426,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17475.0>},
{name,ns_rest_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_rest_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.428,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.17492.0>},
{name,ns_capi_ssl_service},
{mfargs,
{ns_ssl_services_setup,start_link_capi_service,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.428,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17470.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.428,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17509.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.428,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17510.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.428,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17511.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17512.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17529.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17530.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.17531.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[user:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:ns_server_sup<0.17392.0>:menelaus_sup:start_link:44]Couchbase Server has started on web port 8091 on node 'ns_1@10.242.238.90'.
[error_logger:info,2014-08-19T16:49:03.429,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17469.0>},
{name,menelaus},
{mfa,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.430,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17533.0>},
{name,mc_couch_events},
{mfargs,
{gen_event,start_link,[{local,mc_couch_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:03.430,ns_1@10.242.238.90:<0.17535.0>:mc_tcp_listener:init:24]mccouch is listening on port 11213
[error_logger:info,2014-08-19T16:49:03.430,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17534.0>},
{name,mc_conn_sup},
{mfargs,{mc_conn_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.430,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,mc_sup}
started: [{pid,<0.17535.0>},
{name,mc_tcp_listener},
{mfargs,{mc_tcp_listener,start_link,[11213]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.430,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17532.0>},
{name,mc_sup},
{mfa,{mc_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.431,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17536.0>},
{name,ns_ports_setup},
{mfa,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.431,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17537.0>},
{name,ns_port_memcached_killer},
{mfa,{ns_ports_setup,start_memcached_force_killer,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:03.432,ns_1@10.242.238.90:<0.17539.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/opt/couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2014-08-19T16:49:03.432,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17539.0>},
{name,ns_memcached_log_rotator},
{mfa,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.433,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17541.0>},
{name,memcached_clients_pool},
{mfa,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.434,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17542.0>},
{name,proxied_memcached_clients_pool},
{mfa,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.435,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17543.0>},
{name,xdc_lhttpc_pool},
{mfa,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.435,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17544.0>},
{name,ns_null_connection_pool},
{mfa,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.435,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17545.0>},
{name,xdc_replication_sup},
{mfa,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[user:info,2014-08-19T16:49:03.440,ns_1@10.242.238.90:<0.17397.0>:ns_log:crash_consumption_loop:64]Port server moxi on node 'babysitter_of_ns_1@127.0.0.1' exited with status 0. Restarting. Messages: WARNING: curl error: transfer closed with outstanding read data remaining from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
EOL on stdin. Exiting
[ns_server:debug,2014-08-19T16:49:03.441,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:03.441,ns_1@10.242.238.90:<0.17426.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:03.442,ns_1@10.242.238.90:<0.17426.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[error_logger:info,2014-08-19T16:49:03.471,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17546.0>},
{name,xdc_rep_manager},
{mfa,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.471,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17555.0>},
{name,ns_memcached_sockets_pool},
{mfa,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.471,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.17558.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.471,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.17560.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.17559.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17557.0>},
{name,ns_bucket_worker_sup},
{mfa,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17561.0>},
{name,system_stats_collector},
{mfa,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17564.0>},
{name,{stats_archiver,"@system"}},
{mfa,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17566.0>},
{name,{stats_reader,"@system"}},
{mfa,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:03.472,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17567.0>},
{name,compaction_daemon},
{mfa,{compaction_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.473,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:handle_info:444]No buckets to compact. Rescheduling compaction.
[ns_server:debug,2014-08-19T16:49:03.473,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17569.0>:xdc_rdoc_replication_srv:init:76]Loaded the following docs:
[]
[ns_server:debug,2014-08-19T16:49:03.473,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17569.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[error_logger:info,2014-08-19T16:49:03.473,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17569.0>},
{name,xdc_rdoc_replication_srv},
{mfa,{xdc_rdoc_replication_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:03.474,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:info,2014-08-19T16:49:03.474,ns_1@10.242.238.90:set_view_update_daemon<0.17571.0>:set_view_update_daemon:init:50]Set view update daemon, starting with the following settings:
update interval: 5000ms
minimum number of changes: 5000
[error_logger:info,2014-08-19T16:49:03.474,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.17571.0>},
{name,set_view_update_daemon},
{mfa,{set_view_update_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2014-08-19T16:49:03.474,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:perform_actual_join:954]Node ns_1@10.242.238.90 joined cluster
[error_logger:info,2014-08-19T16:49:03.474,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.17392.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[cluster:debug,2014-08-19T16:49:03.474,ns_1@10.242.238.90:ns_cluster<0.17151.0>:ns_cluster:handle_call:167]complete_join([{<<"targetNode">>,<<"ns_1@10.242.238.90">>},
{<<"availableStorage">>,
{struct,[{<<"hdd">>,
[{struct,[{<<"path">>,<<"/">>},
{<<"sizeKBytes">>,103212320},
{<<"usagePercent">>,3}]},
{struct,[{<<"path">>,<<"/dev/shm">>},
{<<"sizeKBytes">>,49515824},
{<<"usagePercent">>,0}]},
{struct,[{<<"path">>,<<"/boot">>},
{<<"sizeKBytes">>,198337},
{<<"usagePercent">>,17}]},
{struct,[{<<"path">>,<<"/data">>},
{<<"sizeKBytes">>,329573012},
{<<"usagePercent">>,1}]},
{struct,[{<<"path">>,<<"/test">>},
{<<"sizeKBytes">>,528447160},
{<<"usagePercent">>,1}]},
{struct,[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"sizeKBytes">>,1922866992},
{<<"usagePercent">>,1}]}]}]}},
{<<"memoryQuota">>,90112},
{<<"storageTotals">>,
{struct,[{<<"ram">>,
{struct,[{<<"total">>,101408407552},
{<<"quotaTotal">>,94489280512},
{<<"quotaUsed">>,13369344000},
{<<"used">>,13174808576},
{<<"usedByData">>,31847576}]}},
{<<"hdd">>,
{struct,[{<<"total">>,1969015799808},
{<<"quotaTotal">>,1969015799808},
{<<"used">>,19690157998},
{<<"usedByData">>,2736915},
{<<"free">>,1949325641810}]}}]}},
{<<"storage">>,
{struct,[{<<"ssd">>,[]},
{<<"hdd">>,
[{struct,[{<<"path">>,<<"/var/lib/pgsql">>},
{<<"index_path">>,<<"/var/lib/pgsql">>},
{<<"quotaMb">>,<<"none">>},
{<<"state">>,<<"ok">>}]}]}]}},
{<<"systemStats">>,
{struct,[{<<"cpu_utilization_rate">>,0.6265664160401002},
{<<"swap_total">>,0},
{<<"swap_used">>,0},
{<<"mem_total">>,101408407552},
{<<"mem_free">>,89866596352}]}},
{<<"interestingStats">>,
{struct,[{<<"cmd_get">>,0.0},
{<<"couch_docs_actual_disk_size">>,2736915},
{<<"couch_docs_data_size">>,2729956},
{<<"couch_views_actual_disk_size">>,0},
{<<"couch_views_data_size">>,0},
{<<"curr_items">>,0},
{<<"curr_items_tot">>,0},
{<<"ep_bg_fetched">>,0.0},
{<<"get_hits">>,0.0},
{<<"mem_used">>,31847576},
{<<"ops">>,0.0},
{<<"vb_replica_curr_items">>,0}]}},
{<<"uptime">>,<<"4088">>},
{<<"memoryTotal">>,101408407552},
{<<"memoryFree">>,89866596352},
{<<"mcdMemoryReserved">>,77368},
{<<"mcdMemoryAllocated">>,77368},
{<<"couchApiBase">>,<<"http://10.242.238.88:8092/">>},
{<<"otpCookie">>,<<"xyzevwdfypcplvpp">>},
{<<"clusterMembership">>,<<"active">>},
{<<"status">>,<<"healthy">>},
{<<"otpNode">>,<<"ns_1@10.242.238.88">>},
{<<"thisNode">>,true},
{<<"hostname">>,<<"10.242.238.88:8091">>},
{<<"clusterCompatibility">>,131077},
{<<"version">>,<<"2.5.1-1083-rel-enterprise">>},
{<<"os">>,<<"x86_64-unknown-linux-gnu">>},
{<<"ports">>,
{struct,[{<<"httpsMgmt">>,18091},
{<<"httpsCAPI">>,18092},
{<<"sslProxy">>,11214},
{<<"proxy">>,11211},
{<<"direct">>,11210}]}}]) -> {ok,ok}
[ns_server:info,2014-08-19T16:49:05.076,ns_1@10.242.238.90:mb_master<0.17457.0>:mb_master:candidate:362]Changing master from undefined to 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:25.982,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2014-08-19T16:49:25.982,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:25.982,ns_1@10.242.238.90:mb_master<0.17457.0>:mb_master:update_peers:506]List of peers has changed from ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90'] to ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90',
'ns_1@10.242.238.91']
[ns_server:debug,2014-08-19T16:49:25.982,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
nodes_wanted ->
['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90',
'ns_1@10.242.238.91']
[ns_server:debug,2014-08-19T16:49:25.983,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:25.983,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
server_groups ->
[[{uuid,<<"0">>},
{name,<<"Group 1">>},
{nodes,['ns_1@10.242.238.88','ns_1@10.242.238.89','ns_1@10.242.238.90',
'ns_1@10.242.238.91']}]]
[ns_server:debug,2014-08-19T16:49:25.983,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',membership} ->
inactiveAdded
[ns_server:debug,2014-08-19T16:49:26.026,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:26.027,ns_1@10.242.238.90:<0.17682.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90',
'ns_1@10.242.238.91'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:26.030,ns_1@10.242.238.90:<0.17682.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:26.491,ns_1@10.242.238.90:<0.17570.0>:xdc_rdoc_replication_srv:nodeup_monitoring_loop:46]got nodeup event. Considering rdocs replication
[ns_server:debug,2014-08-19T16:49:26.491,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17569.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[user:info,2014-08-19T16:49:26.491,ns_1@10.242.238.90:ns_node_disco<0.17408.0>:ns_node_disco:handle_info:159]Node 'ns_1@10.242.238.90' saw that node 'ns_1@10.242.238.91' came up. Tags: []
[ns_server:debug,2014-08-19T16:49:26.491,ns_1@10.242.238.90:ns_node_disco_events<0.17407.0>:ns_node_disco_rep_events:handle_event:42]Detected a new nodes (['ns_1@10.242.238.91']). Moving config around.
[ns_server:info,2014-08-19T16:49:26.491,ns_1@10.242.238.90:ns_node_disco_events<0.17407.0>:ns_node_disco_log:handle_event:46]ns_node_disco_log: nodes changed: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']
[ns_server:warn,2014-08-19T16:49:26.492,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17569.0>:xdc_rdoc_replication_srv:handle_info:150]Remote server node {xdc_rdoc_replication_srv,'ns_1@10.242.238.91'} process down: noproc
[ns_server:debug,2014-08-19T16:49:26.517,ns_1@10.242.238.90:ns_config_events<0.17153.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2014-08-19T16:49:26.517,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
otp ->
[{cookie,xyzevwdfypcplvpp}]
[ns_server:debug,2014-08-19T16:49:26.517,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2014-08-19T16:49:26.517,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',capi_port} ->
8092
[ns_server:debug,2014-08-19T16:49:26.517,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',compaction_daemon} ->
[{check_interval,30},{min_file_size,131072}]
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',config_version} ->
{2,3,0}
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',isasl} ->
[{path,"/opt/couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',memcached} ->
[{mccouch_port,11213},
{engines,
[{membase,
[{engine,"/opt/couchbase/lib/memcached/ep.so"},
{static_config_string,
"vb0=false;waitforwarmup=false;failpartialwarmup=false"}]},
{memcached,
[{engine,"/opt/couchbase/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{log_path,"/opt/couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003},
{dedicated_port,11209},
{bucket_engine,"/opt/couchbase/lib/memcached/bucket_engine.so"},
{port,11210},
{dedicated_port,11209},
{admin_user,"_admin"},
{admin_pass,"*****"},
{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',moxi} ->
[{port,11211},{verbosity,[]}]
[ns_server:debug,2014-08-19T16:49:26.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',ns_log} ->
[{filename,"/opt/couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',port_servers} ->
[{moxi,"/opt/couchbase/bin/moxi",
["-Z",
{"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200",
[port]},
"-z",
{"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming",
[{misc,this_node_rest_port,[]}]},
"-p","0","-Y","y","-O","stderr",
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MOXI_SASL_PLAIN_USR",{"~s",[{ns_moxi_sup,rest_user,[]}]}},
{"MOXI_SASL_PLAIN_PWD",{"~s",[{ns_moxi_sup,rest_pass,[]}]}}]},
use_stdio,exit_status,port_server_send_eol,stderr_to_stdout,stream]},
{memcached,"/opt/couchbase/bin/memcached",
["-X","/opt/couchbase/lib/memcached/stdin_term_handler.so","-X",
{"/opt/couchbase/lib/memcached/file_logger.so,cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]},
"-l",
{"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]},
"-p",
{"~B",[port]},
"-E","/opt/couchbase/lib/memcached/bucket_engine.so","-B",
"binary","-r","-c","10000","-e",
{"admin=~s;default_bucket_name=default;auto_create=false",
[admin_user]},
{"~s",[verbosity]}],
[{env,[{"EVENT_NOSELECT","1"},
{"MEMCACHED_TOP_KEYS","100"},
{"ISASL_PWFILE",{"~s",[{isasl,path}]}}]},
use_stdio,stderr_to_stdout,exit_status,port_server_send_eol,
stream]}]
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',rest} ->
[{port,8091},{port_meta,global}]
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',ssl_capi_port} ->
18092
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',ssl_proxy_downstream_port} ->
11214
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',ssl_proxy_upstream_port} ->
11215
[ns_server:debug,2014-08-19T16:49:26.519,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',ssl_rest_port} ->
18091
[ns_server:debug,2014-08-19T16:49:26.555,ns_1@10.242.238.90:ns_cookie_manager<0.17150.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/opt/couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2014-08-19T16:49:26.555,ns_1@10.242.238.90:<0.17689.0>:ns_node_disco:do_nodes_wanted_updated_fun:199]ns_node_disco: nodes_wanted updated: ['ns_1@10.242.238.88',
'ns_1@10.242.238.89',
'ns_1@10.242.238.90',
'ns_1@10.242.238.91'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:26.556,ns_1@10.242.238.90:<0.17689.0>:ns_node_disco:do_nodes_wanted_updated_fun:205]ns_node_disco: nodes_wanted pong: ['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91'], with cookie: xyzevwdfypcplvpp
[ns_server:debug,2014-08-19T16:49:26.557,ns_1@10.242.238.90:xdc_rdoc_replication_srv<0.17569.0>:xdc_rdoc_replication_srv:handle_info:154]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:30.839,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:30.840,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.89',membership} ->
active
[ns_server:debug,2014-08-19T16:49:30.840,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.90',membership} ->
active
[ns_server:debug,2014-08-19T16:49:30.841,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
{node,'ns_1@10.242.238.91',membership} ->
active
[ns_server:debug,2014-08-19T16:49:30.841,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2308 us
[ns_server:debug,2014-08-19T16:49:30.842,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
counters ->
[{rebalance_start,1}]
[ns_server:debug,2014-08-19T16:49:30.842,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rebalance_status ->
running
[ns_server:debug,2014-08-19T16:49:30.842,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
rebalancer_pid ->
<16550.25442.0>
[user:info,2014-08-19T16:49:30.863,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_unused_buckets_db_files:492]Deleting old data files of bucket "tiles"
[user:info,2014-08-19T16:49:30.863,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_unused_buckets_db_files:492]Deleting old data files of bucket "default"
[ns_server:info,2014-08-19T16:49:30.867,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/master">>: ok
[ns_server:info,2014-08-19T16:49:30.870,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/999">>: ok
[ns_server:info,2014-08-19T16:49:30.873,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/998">>: ok
[ns_server:info,2014-08-19T16:49:30.876,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/997">>: ok
[ns_server:info,2014-08-19T16:49:30.881,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/996">>: ok
[ns_server:info,2014-08-19T16:49:30.884,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/995">>: ok
[ns_server:info,2014-08-19T16:49:30.888,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/994">>: ok
[ns_server:info,2014-08-19T16:49:30.891,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/993">>: ok
[ns_server:info,2014-08-19T16:49:30.893,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/992">>: ok
[ns_server:info,2014-08-19T16:49:30.896,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/991">>: ok
[ns_server:info,2014-08-19T16:49:30.899,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/990">>: ok
[ns_server:info,2014-08-19T16:49:30.901,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/99">>: ok
[ns_server:info,2014-08-19T16:49:30.904,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/989">>: ok
[ns_server:info,2014-08-19T16:49:30.907,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/988">>: ok
[ns_server:info,2014-08-19T16:49:30.911,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/987">>: ok
[ns_server:info,2014-08-19T16:49:30.914,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/986">>: ok
[ns_server:info,2014-08-19T16:49:30.917,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/985">>: ok
[ns_server:info,2014-08-19T16:49:30.920,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/984">>: ok
[ns_server:info,2014-08-19T16:49:30.924,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/983">>: ok
[ns_server:info,2014-08-19T16:49:30.926,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/982">>: ok
[ns_server:info,2014-08-19T16:49:30.928,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/981">>: ok
[ns_server:info,2014-08-19T16:49:30.931,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/980">>: ok
[ns_server:info,2014-08-19T16:49:30.934,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/98">>: ok
[ns_server:info,2014-08-19T16:49:30.936,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/979">>: ok
[ns_server:info,2014-08-19T16:49:30.938,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/978">>: ok
[ns_server:info,2014-08-19T16:49:30.941,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/977">>: ok
[ns_server:info,2014-08-19T16:49:30.943,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/976">>: ok
[ns_server:info,2014-08-19T16:49:30.945,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/975">>: ok
[ns_server:info,2014-08-19T16:49:30.947,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/974">>: ok
[ns_server:info,2014-08-19T16:49:30.949,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/973">>: ok
[ns_server:info,2014-08-19T16:49:30.951,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/972">>: ok
[ns_server:info,2014-08-19T16:49:30.953,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/971">>: ok
[ns_server:info,2014-08-19T16:49:30.955,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/970">>: ok
[ns_server:info,2014-08-19T16:49:30.957,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/97">>: ok
[ns_server:info,2014-08-19T16:49:30.959,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/969">>: ok
[ns_server:info,2014-08-19T16:49:30.960,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/968">>: ok
[ns_server:info,2014-08-19T16:49:30.963,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/967">>: ok
[ns_server:info,2014-08-19T16:49:30.965,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/966">>: ok
[ns_server:info,2014-08-19T16:49:30.967,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/965">>: ok
[ns_server:info,2014-08-19T16:49:30.969,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/964">>: ok
[ns_server:info,2014-08-19T16:49:30.971,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/963">>: ok
[ns_server:info,2014-08-19T16:49:30.973,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/962">>: ok
[ns_server:info,2014-08-19T16:49:30.975,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/961">>: ok
[ns_server:info,2014-08-19T16:49:30.976,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/960">>: ok
[ns_server:info,2014-08-19T16:49:30.980,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/96">>: ok
[ns_server:info,2014-08-19T16:49:30.983,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/959">>: ok
[ns_server:info,2014-08-19T16:49:30.986,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/958">>: ok
[ns_server:info,2014-08-19T16:49:30.989,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/957">>: ok
[ns_server:info,2014-08-19T16:49:30.992,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/956">>: ok
[ns_server:info,2014-08-19T16:49:30.995,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/955">>: ok
[ns_server:info,2014-08-19T16:49:30.997,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/954">>: ok
[ns_server:info,2014-08-19T16:49:30.999,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/953">>: ok
[ns_server:info,2014-08-19T16:49:31.001,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/952">>: ok
[ns_server:info,2014-08-19T16:49:31.004,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/951">>: ok
[ns_server:info,2014-08-19T16:49:31.007,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/950">>: ok
[ns_server:info,2014-08-19T16:49:31.009,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/95">>: ok
[ns_server:info,2014-08-19T16:49:31.011,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/949">>: ok
[ns_server:info,2014-08-19T16:49:31.014,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/948">>: ok
[ns_server:info,2014-08-19T16:49:31.016,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/947">>: ok
[ns_server:info,2014-08-19T16:49:31.018,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/946">>: ok
[ns_server:info,2014-08-19T16:49:31.022,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/945">>: ok
[ns_server:info,2014-08-19T16:49:31.024,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/944">>: ok
[ns_server:info,2014-08-19T16:49:31.026,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/943">>: ok
[ns_server:info,2014-08-19T16:49:31.029,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/942">>: ok
[ns_server:info,2014-08-19T16:49:31.032,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/941">>: ok
[ns_server:info,2014-08-19T16:49:31.034,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/940">>: ok
[ns_server:info,2014-08-19T16:49:31.038,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/94">>: ok
[ns_server:info,2014-08-19T16:49:31.040,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/939">>: ok
[ns_server:info,2014-08-19T16:49:31.042,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/938">>: ok
[ns_server:info,2014-08-19T16:49:31.044,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/93">>: ok
[ns_server:info,2014-08-19T16:49:31.047,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/92">>: ok
[ns_server:info,2014-08-19T16:49:31.049,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/91">>: ok
[ns_server:info,2014-08-19T16:49:31.051,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/90">>: ok
[ns_server:info,2014-08-19T16:49:31.053,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/89">>: ok
[ns_server:info,2014-08-19T16:49:31.055,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/88">>: ok
[ns_server:info,2014-08-19T16:49:31.058,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/87">>: ok
[ns_server:info,2014-08-19T16:49:31.060,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/86">>: ok
[ns_server:info,2014-08-19T16:49:31.063,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/767">>: ok
[ns_server:info,2014-08-19T16:49:31.065,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/766">>: ok
[ns_server:info,2014-08-19T16:49:31.068,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/765">>: ok
[ns_server:info,2014-08-19T16:49:31.069,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/764">>: ok
[ns_server:info,2014-08-19T16:49:31.072,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/763">>: ok
[ns_server:info,2014-08-19T16:49:31.074,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/762">>: ok
[ns_server:info,2014-08-19T16:49:31.077,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/761">>: ok
[ns_server:info,2014-08-19T16:49:31.079,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/760">>: ok
[ns_server:info,2014-08-19T16:49:31.081,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/759">>: ok
[ns_server:info,2014-08-19T16:49:31.083,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/758">>: ok
[ns_server:info,2014-08-19T16:49:31.085,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/757">>: ok
[ns_server:info,2014-08-19T16:49:31.087,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/756">>: ok
[ns_server:info,2014-08-19T16:49:31.089,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/755">>: ok
[ns_server:info,2014-08-19T16:49:31.091,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/754">>: ok
[ns_server:info,2014-08-19T16:49:31.093,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/753">>: ok
[ns_server:info,2014-08-19T16:49:31.095,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/752">>: ok
[ns_server:info,2014-08-19T16:49:31.098,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/751">>: ok
[ns_server:info,2014-08-19T16:49:31.101,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/750">>: ok
[ns_server:info,2014-08-19T16:49:31.104,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/749">>: ok
[ns_server:info,2014-08-19T16:49:31.106,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/748">>: ok
[ns_server:info,2014-08-19T16:49:31.107,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/747">>: ok
[ns_server:info,2014-08-19T16:49:31.109,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/746">>: ok
[ns_server:info,2014-08-19T16:49:31.112,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/745">>: ok
[ns_server:info,2014-08-19T16:49:31.114,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/744">>: ok
[ns_server:info,2014-08-19T16:49:31.116,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/743">>: ok
[ns_server:info,2014-08-19T16:49:31.118,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/742">>: ok
[ns_server:info,2014-08-19T16:49:31.120,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/741">>: ok
[ns_server:info,2014-08-19T16:49:31.122,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/740">>: ok
[ns_server:info,2014-08-19T16:49:31.124,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/739">>: ok
[ns_server:info,2014-08-19T16:49:31.126,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/738">>: ok
[ns_server:info,2014-08-19T16:49:31.128,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/737">>: ok
[ns_server:info,2014-08-19T16:49:31.130,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/736">>: ok
[ns_server:info,2014-08-19T16:49:31.132,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/735">>: ok
[ns_server:info,2014-08-19T16:49:31.133,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/734">>: ok
[ns_server:info,2014-08-19T16:49:31.135,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/733">>: ok
[ns_server:info,2014-08-19T16:49:31.137,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/732">>: ok
[ns_server:info,2014-08-19T16:49:31.139,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/731">>: ok
[ns_server:info,2014-08-19T16:49:31.142,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/730">>: ok
[ns_server:info,2014-08-19T16:49:31.146,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/729">>: ok
[ns_server:info,2014-08-19T16:49:31.148,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/728">>: ok
[ns_server:info,2014-08-19T16:49:31.150,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/727">>: ok
[ns_server:info,2014-08-19T16:49:31.152,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/726">>: ok
[ns_server:info,2014-08-19T16:49:31.155,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/725">>: ok
[ns_server:info,2014-08-19T16:49:31.157,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/724">>: ok
[ns_server:info,2014-08-19T16:49:31.159,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/723">>: ok
[ns_server:info,2014-08-19T16:49:31.161,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/722">>: ok
[ns_server:info,2014-08-19T16:49:31.164,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/721">>: ok
[ns_server:info,2014-08-19T16:49:31.166,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/720">>: ok
[ns_server:info,2014-08-19T16:49:31.168,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/719">>: ok
[ns_server:info,2014-08-19T16:49:31.170,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/718">>: ok
[ns_server:info,2014-08-19T16:49:31.172,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/717">>: ok
[ns_server:info,2014-08-19T16:49:31.174,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/716">>: ok
[ns_server:info,2014-08-19T16:49:31.177,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/715">>: ok
[ns_server:info,2014-08-19T16:49:31.180,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/714">>: ok
[ns_server:info,2014-08-19T16:49:31.182,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/713">>: ok
[ns_server:info,2014-08-19T16:49:31.185,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/712">>: ok
[ns_server:info,2014-08-19T16:49:31.187,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/711">>: ok
[ns_server:info,2014-08-19T16:49:31.189,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/710">>: ok
[ns_server:info,2014-08-19T16:49:31.192,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/709">>: ok
[ns_server:info,2014-08-19T16:49:31.194,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/708">>: ok
[ns_server:info,2014-08-19T16:49:31.196,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/707">>: ok
[ns_server:info,2014-08-19T16:49:31.199,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/706">>: ok
[ns_server:info,2014-08-19T16:49:31.201,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/705">>: ok
[ns_server:info,2014-08-19T16:49:31.203,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/704">>: ok
[ns_server:info,2014-08-19T16:49:31.206,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/703">>: ok
[ns_server:info,2014-08-19T16:49:31.208,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/702">>: ok
[ns_server:info,2014-08-19T16:49:31.210,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/701">>: ok
[ns_server:info,2014-08-19T16:49:31.213,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/700">>: ok
[ns_server:info,2014-08-19T16:49:31.215,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/699">>: ok
[ns_server:info,2014-08-19T16:49:31.218,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/698">>: ok
[ns_server:info,2014-08-19T16:49:31.220,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/697">>: ok
[ns_server:info,2014-08-19T16:49:31.222,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/696">>: ok
[ns_server:info,2014-08-19T16:49:31.224,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/695">>: ok
[ns_server:info,2014-08-19T16:49:31.226,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/694">>: ok
[ns_server:info,2014-08-19T16:49:31.229,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/693">>: ok
[ns_server:info,2014-08-19T16:49:31.231,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/692">>: ok
[ns_server:info,2014-08-19T16:49:31.233,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/691">>: ok
[ns_server:info,2014-08-19T16:49:31.235,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/690">>: ok
[ns_server:info,2014-08-19T16:49:31.238,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/689">>: ok
[ns_server:info,2014-08-19T16:49:31.240,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/688">>: ok
[ns_server:info,2014-08-19T16:49:31.242,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/687">>: ok
[ns_server:info,2014-08-19T16:49:31.244,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/686">>: ok
[ns_server:info,2014-08-19T16:49:31.246,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/685">>: ok
[ns_server:info,2014-08-19T16:49:31.248,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/684">>: ok
[ns_server:info,2014-08-19T16:49:31.250,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/683">>: ok
[ns_server:info,2014-08-19T16:49:31.252,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/682">>: ok
[ns_server:info,2014-08-19T16:49:31.254,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/681">>: ok
[ns_server:info,2014-08-19T16:49:31.256,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/680">>: ok
[ns_server:info,2014-08-19T16:49:31.258,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/679">>: ok
[ns_server:info,2014-08-19T16:49:31.260,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/678">>: ok
[ns_server:info,2014-08-19T16:49:31.262,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/677">>: ok
[ns_server:info,2014-08-19T16:49:31.264,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/676">>: ok
[ns_server:info,2014-08-19T16:49:31.266,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/675">>: ok
[ns_server:info,2014-08-19T16:49:31.269,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/674">>: ok
[ns_server:info,2014-08-19T16:49:31.270,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/673">>: ok
[ns_server:info,2014-08-19T16:49:31.272,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/672">>: ok
[ns_server:info,2014-08-19T16:49:31.274,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/671">>: ok
[ns_server:info,2014-08-19T16:49:31.275,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/670">>: ok
[ns_server:info,2014-08-19T16:49:31.277,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/669">>: ok
[ns_server:info,2014-08-19T16:49:31.279,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/668">>: ok
[ns_server:info,2014-08-19T16:49:31.281,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/667">>: ok
[ns_server:info,2014-08-19T16:49:31.283,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/666">>: ok
[ns_server:info,2014-08-19T16:49:31.285,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/665">>: ok
[ns_server:info,2014-08-19T16:49:31.287,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/664">>: ok
[ns_server:info,2014-08-19T16:49:31.289,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/663">>: ok
[ns_server:info,2014-08-19T16:49:31.291,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/662">>: ok
[ns_server:info,2014-08-19T16:49:31.293,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/661">>: ok
[ns_server:info,2014-08-19T16:49:31.295,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/660">>: ok
[ns_server:info,2014-08-19T16:49:31.297,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/659">>: ok
[ns_server:info,2014-08-19T16:49:31.299,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/658">>: ok
[ns_server:info,2014-08-19T16:49:31.302,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/657">>: ok
[ns_server:info,2014-08-19T16:49:31.303,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/656">>: ok
[ns_server:info,2014-08-19T16:49:31.305,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/655">>: ok
[ns_server:info,2014-08-19T16:49:31.306,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/654">>: ok
[ns_server:info,2014-08-19T16:49:31.308,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/653">>: ok
[ns_server:info,2014-08-19T16:49:31.310,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/652">>: ok
[ns_server:info,2014-08-19T16:49:31.312,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/651">>: ok
[ns_server:info,2014-08-19T16:49:31.314,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/650">>: ok
[ns_server:info,2014-08-19T16:49:31.316,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/649">>: ok
[ns_server:info,2014-08-19T16:49:31.317,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/648">>: ok
[ns_server:info,2014-08-19T16:49:31.319,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/647">>: ok
[ns_server:info,2014-08-19T16:49:31.320,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/646">>: ok
[ns_server:info,2014-08-19T16:49:31.323,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/645">>: ok
[ns_server:info,2014-08-19T16:49:31.324,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/644">>: ok
[ns_server:info,2014-08-19T16:49:31.326,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/643">>: ok
[ns_server:info,2014-08-19T16:49:31.329,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/642">>: ok
[ns_server:info,2014-08-19T16:49:31.331,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/641">>: ok
[ns_server:info,2014-08-19T16:49:31.333,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/640">>: ok
[ns_server:info,2014-08-19T16:49:31.335,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/639">>: ok
[ns_server:info,2014-08-19T16:49:31.337,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/638">>: ok
[ns_server:info,2014-08-19T16:49:31.339,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/637">>: ok
[ns_server:info,2014-08-19T16:49:31.341,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/636">>: ok
[ns_server:info,2014-08-19T16:49:31.343,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/635">>: ok
[ns_server:info,2014-08-19T16:49:31.345,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/634">>: ok
[ns_server:info,2014-08-19T16:49:31.347,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/633">>: ok
[ns_server:info,2014-08-19T16:49:31.348,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/632">>: ok
[ns_server:info,2014-08-19T16:49:31.350,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/631">>: ok
[ns_server:info,2014-08-19T16:49:31.352,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/630">>: ok
[ns_server:info,2014-08-19T16:49:31.353,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/629">>: ok
[ns_server:info,2014-08-19T16:49:31.355,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/628">>: ok
[ns_server:info,2014-08-19T16:49:31.357,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/627">>: ok
[ns_server:info,2014-08-19T16:49:31.358,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/626">>: ok
[ns_server:info,2014-08-19T16:49:31.360,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/625">>: ok
[ns_server:info,2014-08-19T16:49:31.361,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/624">>: ok
[ns_server:info,2014-08-19T16:49:31.363,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/623">>: ok
[ns_server:info,2014-08-19T16:49:31.364,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/622">>: ok
[ns_server:info,2014-08-19T16:49:31.366,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/621">>: ok
[ns_server:info,2014-08-19T16:49:31.368,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/620">>: ok
[ns_server:info,2014-08-19T16:49:31.369,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/619">>: ok
[ns_server:info,2014-08-19T16:49:31.371,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/618">>: ok
[ns_server:info,2014-08-19T16:49:31.372,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/617">>: ok
[ns_server:info,2014-08-19T16:49:31.374,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/616">>: ok
[ns_server:info,2014-08-19T16:49:31.375,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/615">>: ok
[ns_server:info,2014-08-19T16:49:31.377,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/614">>: ok
[ns_server:info,2014-08-19T16:49:31.378,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/613">>: ok
[ns_server:info,2014-08-19T16:49:31.379,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/612">>: ok
[ns_server:info,2014-08-19T16:49:31.381,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/611">>: ok
[ns_server:info,2014-08-19T16:49:31.383,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/610">>: ok
[ns_server:info,2014-08-19T16:49:31.385,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/609">>: ok
[ns_server:info,2014-08-19T16:49:31.387,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/608">>: ok
[ns_server:info,2014-08-19T16:49:31.389,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/607">>: ok
[ns_server:info,2014-08-19T16:49:31.390,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/606">>: ok
[ns_server:info,2014-08-19T16:49:31.392,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/605">>: ok
[ns_server:info,2014-08-19T16:49:31.394,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/604">>: ok
[ns_server:info,2014-08-19T16:49:31.397,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/603">>: ok
[ns_server:info,2014-08-19T16:49:31.399,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/602">>: ok
[ns_server:info,2014-08-19T16:49:31.400,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/601">>: ok
[ns_server:info,2014-08-19T16:49:31.402,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/600">>: ok
[ns_server:info,2014-08-19T16:49:31.404,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/599">>: ok
[ns_server:info,2014-08-19T16:49:31.406,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/598">>: ok
[ns_server:info,2014-08-19T16:49:31.408,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/597">>: ok
[ns_server:info,2014-08-19T16:49:31.410,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/596">>: ok
[ns_server:info,2014-08-19T16:49:31.412,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/595">>: ok
[ns_server:info,2014-08-19T16:49:31.414,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/594">>: ok
[ns_server:info,2014-08-19T16:49:31.416,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/593">>: ok
[ns_server:info,2014-08-19T16:49:31.419,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/592">>: ok
[ns_server:info,2014-08-19T16:49:31.420,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/591">>: ok
[ns_server:info,2014-08-19T16:49:31.422,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/590">>: ok
[ns_server:info,2014-08-19T16:49:31.424,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/589">>: ok
[ns_server:info,2014-08-19T16:49:31.426,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/588">>: ok
[ns_server:info,2014-08-19T16:49:31.428,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/587">>: ok
[ns_server:info,2014-08-19T16:49:31.430,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/586">>: ok
[ns_server:info,2014-08-19T16:49:31.432,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/585">>: ok
[ns_server:info,2014-08-19T16:49:31.434,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/584">>: ok
[ns_server:info,2014-08-19T16:49:31.436,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/583">>: ok
[ns_server:info,2014-08-19T16:49:31.437,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/582">>: ok
[ns_server:info,2014-08-19T16:49:31.439,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/581">>: ok
[ns_server:info,2014-08-19T16:49:31.441,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/580">>: ok
[ns_server:info,2014-08-19T16:49:31.443,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/579">>: ok
[ns_server:info,2014-08-19T16:49:31.444,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/578">>: ok
[ns_server:info,2014-08-19T16:49:31.446,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/577">>: ok
[ns_server:info,2014-08-19T16:49:31.448,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/576">>: ok
[ns_server:info,2014-08-19T16:49:31.451,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/575">>: ok
[ns_server:info,2014-08-19T16:49:31.453,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/574">>: ok
[ns_server:info,2014-08-19T16:49:31.454,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/573">>: ok
[ns_server:info,2014-08-19T16:49:31.456,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/572">>: ok
[ns_server:info,2014-08-19T16:49:31.458,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/571">>: ok
[ns_server:info,2014-08-19T16:49:31.460,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/570">>: ok
[ns_server:info,2014-08-19T16:49:31.462,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/569">>: ok
[ns_server:info,2014-08-19T16:49:31.463,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/568">>: ok
[ns_server:info,2014-08-19T16:49:31.465,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/567">>: ok
[ns_server:info,2014-08-19T16:49:31.467,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/566">>: ok
[ns_server:info,2014-08-19T16:49:31.469,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/565">>: ok
[ns_server:info,2014-08-19T16:49:31.470,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/564">>: ok
[ns_server:info,2014-08-19T16:49:31.472,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/563">>: ok
[ns_server:info,2014-08-19T16:49:31.474,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/562">>: ok
[ns_server:info,2014-08-19T16:49:31.476,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/561">>: ok
[ns_server:info,2014-08-19T16:49:31.477,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/560">>: ok
[ns_server:info,2014-08-19T16:49:31.479,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/559">>: ok
[ns_server:info,2014-08-19T16:49:31.480,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/558">>: ok
[ns_server:info,2014-08-19T16:49:31.482,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/557">>: ok
[ns_server:info,2014-08-19T16:49:31.484,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/556">>: ok
[ns_server:info,2014-08-19T16:49:31.486,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/555">>: ok
[ns_server:info,2014-08-19T16:49:31.488,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/554">>: ok
[ns_server:info,2014-08-19T16:49:31.489,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/553">>: ok
[ns_server:info,2014-08-19T16:49:31.491,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/552">>: ok
[ns_server:info,2014-08-19T16:49:31.492,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/551">>: ok
[ns_server:info,2014-08-19T16:49:31.494,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/550">>: ok
[ns_server:info,2014-08-19T16:49:31.495,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/549">>: ok
[ns_server:info,2014-08-19T16:49:31.497,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/548">>: ok
[ns_server:info,2014-08-19T16:49:31.498,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/547">>: ok
[ns_server:info,2014-08-19T16:49:31.499,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/546">>: ok
[ns_server:info,2014-08-19T16:49:31.501,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/545">>: ok
[ns_server:info,2014-08-19T16:49:31.502,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/544">>: ok
[ns_server:info,2014-08-19T16:49:31.504,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/543">>: ok
[ns_server:info,2014-08-19T16:49:31.506,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/542">>: ok
[ns_server:info,2014-08-19T16:49:31.508,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/541">>: ok
[ns_server:info,2014-08-19T16:49:31.510,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/540">>: ok
[ns_server:info,2014-08-19T16:49:31.511,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/539">>: ok
[ns_server:info,2014-08-19T16:49:31.513,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/538">>: ok
[ns_server:info,2014-08-19T16:49:31.514,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/537">>: ok
[ns_server:info,2014-08-19T16:49:31.516,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/536">>: ok
[ns_server:info,2014-08-19T16:49:31.517,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/535">>: ok
[ns_server:info,2014-08-19T16:49:31.519,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/534">>: ok
[ns_server:info,2014-08-19T16:49:31.520,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/533">>: ok
[ns_server:info,2014-08-19T16:49:31.522,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/532">>: ok
[ns_server:info,2014-08-19T16:49:31.523,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/531">>: ok
[ns_server:info,2014-08-19T16:49:31.525,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/530">>: ok
[ns_server:info,2014-08-19T16:49:31.527,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/529">>: ok
[ns_server:info,2014-08-19T16:49:31.528,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/528">>: ok
[ns_server:info,2014-08-19T16:49:31.530,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/527">>: ok
[ns_server:info,2014-08-19T16:49:31.531,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/526">>: ok
[ns_server:info,2014-08-19T16:49:31.533,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/525">>: ok
[ns_server:info,2014-08-19T16:49:31.534,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/524">>: ok
[ns_server:info,2014-08-19T16:49:31.535,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/523">>: ok
[ns_server:info,2014-08-19T16:49:31.536,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/522">>: ok
[ns_server:info,2014-08-19T16:49:31.538,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/521">>: ok
[ns_server:info,2014-08-19T16:49:31.539,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/520">>: ok
[ns_server:info,2014-08-19T16:49:31.541,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/519">>: ok
[ns_server:info,2014-08-19T16:49:31.543,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/518">>: ok
[ns_server:info,2014-08-19T16:49:31.544,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/517">>: ok
[ns_server:info,2014-08-19T16:49:31.546,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/516">>: ok
[ns_server:info,2014-08-19T16:49:31.547,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/515">>: ok
[ns_server:info,2014-08-19T16:49:31.549,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/514">>: ok
[ns_server:info,2014-08-19T16:49:31.550,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/513">>: ok
[ns_server:info,2014-08-19T16:49:31.552,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/512">>: ok
[ns_server:info,2014-08-19T16:49:31.553,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/426">>: ok
[ns_server:info,2014-08-19T16:49:31.554,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/425">>: ok
[ns_server:info,2014-08-19T16:49:31.555,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/424">>: ok
[ns_server:info,2014-08-19T16:49:31.556,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/423">>: ok
[ns_server:info,2014-08-19T16:49:31.557,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/422">>: ok
[ns_server:info,2014-08-19T16:49:31.558,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/421">>: ok
[ns_server:info,2014-08-19T16:49:31.560,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/420">>: ok
[ns_server:info,2014-08-19T16:49:31.561,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/419">>: ok
[ns_server:info,2014-08-19T16:49:31.562,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/418">>: ok
[ns_server:info,2014-08-19T16:49:31.564,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/417">>: ok
[ns_server:info,2014-08-19T16:49:31.565,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/416">>: ok
[ns_server:info,2014-08-19T16:49:31.566,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/415">>: ok
[ns_server:info,2014-08-19T16:49:31.567,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/414">>: ok
[ns_server:info,2014-08-19T16:49:31.569,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/413">>: ok
[ns_server:info,2014-08-19T16:49:31.570,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/412">>: ok
[ns_server:info,2014-08-19T16:49:31.571,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/411">>: ok
[ns_server:info,2014-08-19T16:49:31.573,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/410">>: ok
[ns_server:info,2014-08-19T16:49:31.574,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/409">>: ok
[ns_server:info,2014-08-19T16:49:31.575,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/408">>: ok
[ns_server:info,2014-08-19T16:49:31.576,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/407">>: ok
[ns_server:info,2014-08-19T16:49:31.577,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/406">>: ok
[ns_server:info,2014-08-19T16:49:31.578,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/405">>: ok
[ns_server:info,2014-08-19T16:49:31.579,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/404">>: ok
[ns_server:info,2014-08-19T16:49:31.580,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/403">>: ok
[ns_server:info,2014-08-19T16:49:31.582,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/402">>: ok
[ns_server:info,2014-08-19T16:49:31.583,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/401">>: ok
[ns_server:info,2014-08-19T16:49:31.584,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/400">>: ok
[ns_server:info,2014-08-19T16:49:31.585,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/399">>: ok
[ns_server:info,2014-08-19T16:49:31.586,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/398">>: ok
[ns_server:info,2014-08-19T16:49:31.587,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/397">>: ok
[ns_server:info,2014-08-19T16:49:31.588,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/396">>: ok
[ns_server:info,2014-08-19T16:49:31.590,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/395">>: ok
[ns_server:info,2014-08-19T16:49:31.591,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/394">>: ok
[ns_server:info,2014-08-19T16:49:31.593,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/393">>: ok
[ns_server:info,2014-08-19T16:49:31.594,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/392">>: ok
[ns_server:info,2014-08-19T16:49:31.595,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/391">>: ok
[ns_server:info,2014-08-19T16:49:31.596,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/390">>: ok
[ns_server:info,2014-08-19T16:49:31.597,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/389">>: ok
[ns_server:info,2014-08-19T16:49:31.599,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/388">>: ok
[ns_server:info,2014-08-19T16:49:31.600,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/387">>: ok
[ns_server:info,2014-08-19T16:49:31.601,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/386">>: ok
[ns_server:info,2014-08-19T16:49:31.603,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/385">>: ok
[ns_server:info,2014-08-19T16:49:31.604,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/384">>: ok
[ns_server:info,2014-08-19T16:49:31.605,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/383">>: ok
[ns_server:info,2014-08-19T16:49:31.606,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/382">>: ok
[ns_server:info,2014-08-19T16:49:31.607,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/381">>: ok
[ns_server:info,2014-08-19T16:49:31.609,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/380">>: ok
[ns_server:info,2014-08-19T16:49:31.610,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/379">>: ok
[ns_server:info,2014-08-19T16:49:31.611,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/378">>: ok
[ns_server:info,2014-08-19T16:49:31.611,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/377">>: ok
[ns_server:info,2014-08-19T16:49:31.613,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/376">>: ok
[ns_server:info,2014-08-19T16:49:31.614,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/375">>: ok
[ns_server:info,2014-08-19T16:49:31.615,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/374">>: ok
[ns_server:info,2014-08-19T16:49:31.616,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/373">>: ok
[ns_server:info,2014-08-19T16:49:31.618,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/372">>: ok
[ns_server:info,2014-08-19T16:49:31.619,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/371">>: ok
[ns_server:info,2014-08-19T16:49:31.620,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/370">>: ok
[ns_server:info,2014-08-19T16:49:31.621,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/369">>: ok
[ns_server:info,2014-08-19T16:49:31.622,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/368">>: ok
[ns_server:info,2014-08-19T16:49:31.623,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/367">>: ok
[ns_server:info,2014-08-19T16:49:31.624,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/366">>: ok
[ns_server:info,2014-08-19T16:49:31.625,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/365">>: ok
[ns_server:info,2014-08-19T16:49:31.626,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/364">>: ok
[ns_server:info,2014-08-19T16:49:31.628,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/363">>: ok
[ns_server:info,2014-08-19T16:49:31.629,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/362">>: ok
[ns_server:info,2014-08-19T16:49:31.630,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/361">>: ok
[ns_server:info,2014-08-19T16:49:31.631,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/360">>: ok
[ns_server:info,2014-08-19T16:49:31.632,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/359">>: ok
[ns_server:info,2014-08-19T16:49:31.634,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/358">>: ok
[ns_server:info,2014-08-19T16:49:31.635,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/357">>: ok
[ns_server:info,2014-08-19T16:49:31.636,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/356">>: ok
[ns_server:info,2014-08-19T16:49:31.637,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/355">>: ok
[ns_server:info,2014-08-19T16:49:31.638,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/354">>: ok
[ns_server:info,2014-08-19T16:49:31.639,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/353">>: ok
[ns_server:info,2014-08-19T16:49:31.640,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/352">>: ok
[ns_server:info,2014-08-19T16:49:31.641,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/351">>: ok
[ns_server:info,2014-08-19T16:49:31.643,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/350">>: ok
[ns_server:info,2014-08-19T16:49:31.644,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/349">>: ok
[ns_server:info,2014-08-19T16:49:31.645,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/348">>: ok
[ns_server:info,2014-08-19T16:49:31.646,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/347">>: ok
[ns_server:info,2014-08-19T16:49:31.647,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/346">>: ok
[ns_server:info,2014-08-19T16:49:31.648,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/345">>: ok
[ns_server:info,2014-08-19T16:49:31.649,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/344">>: ok
[ns_server:info,2014-08-19T16:49:31.651,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/343">>: ok
[ns_server:info,2014-08-19T16:49:31.652,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/342">>: ok
[ns_server:info,2014-08-19T16:49:31.653,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/170">>: ok
[ns_server:info,2014-08-19T16:49:31.654,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/169">>: ok
[ns_server:info,2014-08-19T16:49:31.655,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/168">>: ok
[ns_server:info,2014-08-19T16:49:31.656,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/167">>: ok
[ns_server:info,2014-08-19T16:49:31.657,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/166">>: ok
[ns_server:info,2014-08-19T16:49:31.658,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/165">>: ok
[ns_server:info,2014-08-19T16:49:31.659,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/164">>: ok
[ns_server:info,2014-08-19T16:49:31.661,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/163">>: ok
[ns_server:info,2014-08-19T16:49:31.662,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/162">>: ok
[ns_server:info,2014-08-19T16:49:31.663,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/161">>: ok
[ns_server:info,2014-08-19T16:49:31.664,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/160">>: ok
[ns_server:info,2014-08-19T16:49:31.665,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/159">>: ok
[ns_server:info,2014-08-19T16:49:31.666,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/158">>: ok
[ns_server:info,2014-08-19T16:49:31.667,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/157">>: ok
[ns_server:info,2014-08-19T16:49:31.668,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/156">>: ok
[ns_server:info,2014-08-19T16:49:31.669,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/155">>: ok
[ns_server:info,2014-08-19T16:49:31.670,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/154">>: ok
[ns_server:info,2014-08-19T16:49:31.671,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/153">>: ok
[ns_server:info,2014-08-19T16:49:31.672,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/152">>: ok
[ns_server:info,2014-08-19T16:49:31.673,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/151">>: ok
[ns_server:info,2014-08-19T16:49:31.674,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/150">>: ok
[ns_server:info,2014-08-19T16:49:31.675,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/149">>: ok
[ns_server:info,2014-08-19T16:49:31.676,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/148">>: ok
[ns_server:info,2014-08-19T16:49:31.677,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/147">>: ok
[ns_server:info,2014-08-19T16:49:31.677,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/146">>: ok
[ns_server:info,2014-08-19T16:49:31.678,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/145">>: ok
[ns_server:info,2014-08-19T16:49:31.679,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/144">>: ok
[ns_server:info,2014-08-19T16:49:31.680,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/143">>: ok
[ns_server:info,2014-08-19T16:49:31.681,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/142">>: ok
[ns_server:info,2014-08-19T16:49:31.682,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/141">>: ok
[ns_server:info,2014-08-19T16:49:31.683,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/140">>: ok
[ns_server:info,2014-08-19T16:49:31.683,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/139">>: ok
[ns_server:info,2014-08-19T16:49:31.684,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/138">>: ok
[ns_server:info,2014-08-19T16:49:31.685,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/137">>: ok
[ns_server:info,2014-08-19T16:49:31.686,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/136">>: ok
[ns_server:info,2014-08-19T16:49:31.687,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/135">>: ok
[ns_server:info,2014-08-19T16:49:31.688,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/134">>: ok
[ns_server:info,2014-08-19T16:49:31.688,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/133">>: ok
[ns_server:info,2014-08-19T16:49:31.689,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/132">>: ok
[ns_server:info,2014-08-19T16:49:31.690,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/131">>: ok
[ns_server:info,2014-08-19T16:49:31.691,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/130">>: ok
[ns_server:info,2014-08-19T16:49:31.691,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/129">>: ok
[ns_server:info,2014-08-19T16:49:31.692,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/128">>: ok
[ns_server:info,2014-08-19T16:49:31.693,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/127">>: ok
[ns_server:info,2014-08-19T16:49:31.694,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/126">>: ok
[ns_server:info,2014-08-19T16:49:31.694,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/125">>: ok
[ns_server:info,2014-08-19T16:49:31.695,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/124">>: ok
[ns_server:info,2014-08-19T16:49:31.696,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/123">>: ok
[ns_server:info,2014-08-19T16:49:31.697,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/122">>: ok
[ns_server:info,2014-08-19T16:49:31.698,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/121">>: ok
[ns_server:info,2014-08-19T16:49:31.699,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/120">>: ok
[ns_server:info,2014-08-19T16:49:31.700,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/119">>: ok
[ns_server:info,2014-08-19T16:49:31.701,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/118">>: ok
[ns_server:info,2014-08-19T16:49:31.702,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/117">>: ok
[ns_server:info,2014-08-19T16:49:31.702,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/116">>: ok
[ns_server:info,2014-08-19T16:49:31.703,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/115">>: ok
[ns_server:info,2014-08-19T16:49:31.704,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/114">>: ok
[ns_server:info,2014-08-19T16:49:31.705,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/113">>: ok
[ns_server:info,2014-08-19T16:49:31.706,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/112">>: ok
[ns_server:info,2014-08-19T16:49:31.707,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/111">>: ok
[ns_server:info,2014-08-19T16:49:31.707,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/110">>: ok
[ns_server:info,2014-08-19T16:49:31.708,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/109">>: ok
[ns_server:info,2014-08-19T16:49:31.709,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/108">>: ok
[ns_server:info,2014-08-19T16:49:31.709,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/107">>: ok
[ns_server:info,2014-08-19T16:49:31.710,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/106">>: ok
[ns_server:info,2014-08-19T16:49:31.711,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/105">>: ok
[ns_server:info,2014-08-19T16:49:31.711,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/104">>: ok
[ns_server:info,2014-08-19T16:49:31.712,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/103">>: ok
[ns_server:info,2014-08-19T16:49:31.713,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1023">>: ok
[ns_server:info,2014-08-19T16:49:31.713,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1022">>: ok
[ns_server:info,2014-08-19T16:49:31.714,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1021">>: ok
[ns_server:info,2014-08-19T16:49:31.715,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1020">>: ok
[ns_server:info,2014-08-19T16:49:31.716,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/102">>: ok
[ns_server:info,2014-08-19T16:49:31.716,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1019">>: ok
[ns_server:info,2014-08-19T16:49:31.717,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1018">>: ok
[ns_server:info,2014-08-19T16:49:31.718,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1017">>: ok
[ns_server:info,2014-08-19T16:49:31.718,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1016">>: ok
[ns_server:info,2014-08-19T16:49:31.719,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1015">>: ok
[ns_server:info,2014-08-19T16:49:31.720,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1014">>: ok
[ns_server:info,2014-08-19T16:49:31.720,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1013">>: ok
[ns_server:info,2014-08-19T16:49:31.721,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1012">>: ok
[ns_server:info,2014-08-19T16:49:31.722,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1011">>: ok
[ns_server:info,2014-08-19T16:49:31.722,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1010">>: ok
[ns_server:info,2014-08-19T16:49:31.723,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/101">>: ok
[ns_server:info,2014-08-19T16:49:31.724,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1009">>: ok
[ns_server:info,2014-08-19T16:49:31.724,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1008">>: ok
[ns_server:info,2014-08-19T16:49:31.725,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1007">>: ok
[ns_server:info,2014-08-19T16:49:31.725,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1006">>: ok
[ns_server:info,2014-08-19T16:49:31.726,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1005">>: ok
[ns_server:info,2014-08-19T16:49:31.727,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1004">>: ok
[ns_server:info,2014-08-19T16:49:31.727,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1003">>: ok
[ns_server:info,2014-08-19T16:49:31.728,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1002">>: ok
[ns_server:info,2014-08-19T16:49:31.728,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1001">>: ok
[ns_server:info,2014-08-19T16:49:31.729,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/1000">>: ok
[ns_server:info,2014-08-19T16:49:31.730,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"tiles/100">>: ok
[ns_server:info,2014-08-19T16:49:31.730,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_databases_and_files:436]Couch dbs are deleted. Proceeding with bucket directory
[ns_server:debug,2014-08-19T16:49:31.730,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:do_delete_bucket_indexes:457]indexes directory doesn't exist already. fine.
[ns_server:info,2014-08-19T16:49:31.733,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/master">>: ok
[ns_server:info,2014-08-19T16:49:31.736,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/999">>: ok
[ns_server:info,2014-08-19T16:49:31.739,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/998">>: ok
[ns_server:info,2014-08-19T16:49:31.741,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/997">>: ok
[ns_server:info,2014-08-19T16:49:31.744,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/996">>: ok
[ns_server:info,2014-08-19T16:49:31.746,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/995">>: ok
[ns_server:info,2014-08-19T16:49:31.751,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/994">>: ok
[ns_server:info,2014-08-19T16:49:31.753,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/993">>: ok
[ns_server:info,2014-08-19T16:49:31.756,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/992">>: ok
[ns_server:info,2014-08-19T16:49:31.758,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/991">>: ok
[ns_server:info,2014-08-19T16:49:31.761,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/990">>: ok
[ns_server:info,2014-08-19T16:49:31.763,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/99">>: ok
[ns_server:info,2014-08-19T16:49:31.766,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/989">>: ok
[ns_server:info,2014-08-19T16:49:31.769,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/988">>: ok
[ns_server:info,2014-08-19T16:49:31.771,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/987">>: ok
[ns_server:info,2014-08-19T16:49:31.773,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/986">>: ok
[ns_server:info,2014-08-19T16:49:31.776,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/985">>: ok
[ns_server:info,2014-08-19T16:49:31.778,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/984">>: ok
[ns_server:info,2014-08-19T16:49:31.780,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/983">>: ok
[ns_server:info,2014-08-19T16:49:31.782,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/982">>: ok
[ns_server:info,2014-08-19T16:49:31.784,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/981">>: ok
[ns_server:info,2014-08-19T16:49:31.786,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/980">>: ok
[ns_server:info,2014-08-19T16:49:31.789,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/98">>: ok
[ns_server:info,2014-08-19T16:49:31.791,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/979">>: ok
[ns_server:info,2014-08-19T16:49:31.793,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/978">>: ok
[ns_server:info,2014-08-19T16:49:31.795,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/977">>: ok
[ns_server:info,2014-08-19T16:49:31.797,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/976">>: ok
[ns_server:info,2014-08-19T16:49:31.799,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/975">>: ok
[ns_server:info,2014-08-19T16:49:31.802,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/974">>: ok
[ns_server:info,2014-08-19T16:49:31.804,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/973">>: ok
[ns_server:info,2014-08-19T16:49:31.806,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/972">>: ok
[ns_server:info,2014-08-19T16:49:31.808,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/971">>: ok
[ns_server:info,2014-08-19T16:49:31.810,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/970">>: ok
[ns_server:info,2014-08-19T16:49:31.812,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/97">>: ok
[ns_server:info,2014-08-19T16:49:31.815,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/969">>: ok
[ns_server:info,2014-08-19T16:49:31.817,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/968">>: ok
[ns_server:info,2014-08-19T16:49:31.819,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/967">>: ok
[ns_server:info,2014-08-19T16:49:31.821,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/966">>: ok
[ns_server:info,2014-08-19T16:49:31.823,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/965">>: ok
[ns_server:info,2014-08-19T16:49:31.825,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/964">>: ok
[ns_server:info,2014-08-19T16:49:31.827,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/963">>: ok
[ns_server:info,2014-08-19T16:49:31.829,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/962">>: ok
[ns_server:info,2014-08-19T16:49:31.832,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/961">>: ok
[ns_server:info,2014-08-19T16:49:31.834,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/960">>: ok
[ns_server:info,2014-08-19T16:49:31.836,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/96">>: ok
[ns_server:info,2014-08-19T16:49:31.838,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/959">>: ok
[ns_server:info,2014-08-19T16:49:31.840,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/958">>: ok
[ns_server:info,2014-08-19T16:49:31.842,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/957">>: ok
[ns_server:info,2014-08-19T16:49:31.844,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/956">>: ok
[ns_server:info,2014-08-19T16:49:31.846,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/955">>: ok
[ns_server:info,2014-08-19T16:49:31.848,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/954">>: ok
[ns_server:info,2014-08-19T16:49:31.850,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/953">>: ok
[ns_server:info,2014-08-19T16:49:31.852,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/952">>: ok
[ns_server:info,2014-08-19T16:49:31.855,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/951">>: ok
[ns_server:info,2014-08-19T16:49:31.857,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/950">>: ok
[ns_server:info,2014-08-19T16:49:31.858,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/95">>: ok
[ns_server:info,2014-08-19T16:49:31.861,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/949">>: ok
[ns_server:info,2014-08-19T16:49:31.862,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/948">>: ok
[ns_server:info,2014-08-19T16:49:31.865,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/947">>: ok
[ns_server:info,2014-08-19T16:49:31.868,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/946">>: ok
[ns_server:info,2014-08-19T16:49:31.870,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/945">>: ok
[ns_server:info,2014-08-19T16:49:31.872,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/944">>: ok
[ns_server:info,2014-08-19T16:49:31.875,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/943">>: ok
[ns_server:info,2014-08-19T16:49:31.877,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/942">>: ok
[ns_server:info,2014-08-19T16:49:31.879,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/941">>: ok
[ns_server:info,2014-08-19T16:49:31.881,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/940">>: ok
[ns_server:info,2014-08-19T16:49:31.883,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/94">>: ok
[ns_server:info,2014-08-19T16:49:31.885,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/939">>: ok
[ns_server:info,2014-08-19T16:49:31.887,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/938">>: ok
[ns_server:info,2014-08-19T16:49:31.889,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/93">>: ok
[ns_server:info,2014-08-19T16:49:31.891,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/92">>: ok
[ns_server:info,2014-08-19T16:49:31.894,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/91">>: ok
[ns_server:info,2014-08-19T16:49:31.895,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/90">>: ok
[ns_server:info,2014-08-19T16:49:31.897,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/89">>: ok
[ns_server:info,2014-08-19T16:49:31.900,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/88">>: ok
[ns_server:info,2014-08-19T16:49:31.903,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/87">>: ok
[ns_server:info,2014-08-19T16:49:31.905,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/86">>: ok
[ns_server:info,2014-08-19T16:49:31.907,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/767">>: ok
[ns_server:info,2014-08-19T16:49:31.909,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/766">>: ok
[ns_server:info,2014-08-19T16:49:31.911,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/765">>: ok
[ns_server:info,2014-08-19T16:49:31.914,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/764">>: ok
[ns_server:info,2014-08-19T16:49:31.916,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/763">>: ok
[ns_server:info,2014-08-19T16:49:31.917,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/762">>: ok
[ns_server:info,2014-08-19T16:49:31.920,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/761">>: ok
[ns_server:info,2014-08-19T16:49:31.922,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/760">>: ok
[ns_server:info,2014-08-19T16:49:31.925,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/759">>: ok
[ns_server:info,2014-08-19T16:49:31.927,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/758">>: ok
[ns_server:info,2014-08-19T16:49:31.929,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/757">>: ok
[ns_server:info,2014-08-19T16:49:31.932,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/756">>: ok
[ns_server:info,2014-08-19T16:49:31.936,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/755">>: ok
[ns_server:info,2014-08-19T16:49:31.938,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/754">>: ok
[ns_server:info,2014-08-19T16:49:31.940,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/753">>: ok
[ns_server:info,2014-08-19T16:49:31.943,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/752">>: ok
[ns_server:info,2014-08-19T16:49:31.945,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/751">>: ok
[ns_server:info,2014-08-19T16:49:31.948,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/750">>: ok
[ns_server:info,2014-08-19T16:49:31.950,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/749">>: ok
[ns_server:info,2014-08-19T16:49:31.952,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/748">>: ok
[ns_server:info,2014-08-19T16:49:31.954,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/747">>: ok
[ns_server:info,2014-08-19T16:49:31.956,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/746">>: ok
[ns_server:info,2014-08-19T16:49:31.958,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/745">>: ok
[ns_server:info,2014-08-19T16:49:31.960,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/744">>: ok
[ns_server:info,2014-08-19T16:49:31.963,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/743">>: ok
[ns_server:info,2014-08-19T16:49:31.965,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/742">>: ok
[ns_server:info,2014-08-19T16:49:31.968,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/741">>: ok
[ns_server:info,2014-08-19T16:49:31.969,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/740">>: ok
[ns_server:info,2014-08-19T16:49:31.971,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/739">>: ok
[ns_server:info,2014-08-19T16:49:31.973,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/738">>: ok
[ns_server:info,2014-08-19T16:49:31.975,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/737">>: ok
[ns_server:info,2014-08-19T16:49:31.977,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/736">>: ok
[ns_server:info,2014-08-19T16:49:31.978,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/735">>: ok
[ns_server:info,2014-08-19T16:49:31.980,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/734">>: ok
[ns_server:info,2014-08-19T16:49:31.982,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/733">>: ok
[ns_server:info,2014-08-19T16:49:31.984,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/732">>: ok
[ns_server:info,2014-08-19T16:49:31.985,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/731">>: ok
[ns_server:info,2014-08-19T16:49:31.987,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/730">>: ok
[ns_server:info,2014-08-19T16:49:31.989,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/729">>: ok
[ns_server:info,2014-08-19T16:49:31.991,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/728">>: ok
[ns_server:info,2014-08-19T16:49:31.993,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/727">>: ok
[ns_server:info,2014-08-19T16:49:31.995,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/726">>: ok
[ns_server:info,2014-08-19T16:49:31.997,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/725">>: ok
[ns_server:info,2014-08-19T16:49:31.999,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/724">>: ok
[ns_server:info,2014-08-19T16:49:32.001,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/723">>: ok
[ns_server:info,2014-08-19T16:49:32.003,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/722">>: ok
[ns_server:info,2014-08-19T16:49:32.005,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/721">>: ok
[ns_server:info,2014-08-19T16:49:32.008,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/720">>: ok
[ns_server:info,2014-08-19T16:49:32.010,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/719">>: ok
[ns_server:info,2014-08-19T16:49:32.012,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/718">>: ok
[ns_server:info,2014-08-19T16:49:32.014,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/717">>: ok
[ns_server:info,2014-08-19T16:49:32.017,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/716">>: ok
[ns_server:info,2014-08-19T16:49:32.019,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/715">>: ok
[ns_server:info,2014-08-19T16:49:32.021,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/714">>: ok
[ns_server:info,2014-08-19T16:49:32.023,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/713">>: ok
[ns_server:info,2014-08-19T16:49:32.026,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/712">>: ok
[ns_server:info,2014-08-19T16:49:32.028,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/711">>: ok
[ns_server:info,2014-08-19T16:49:32.030,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/710">>: ok
[ns_server:info,2014-08-19T16:49:32.032,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/709">>: ok
[ns_server:info,2014-08-19T16:49:32.034,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/708">>: ok
[ns_server:info,2014-08-19T16:49:32.036,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/707">>: ok
[ns_server:info,2014-08-19T16:49:32.038,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/706">>: ok
[ns_server:info,2014-08-19T16:49:32.040,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/705">>: ok
[ns_server:info,2014-08-19T16:49:32.042,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/704">>: ok
[ns_server:info,2014-08-19T16:49:32.044,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/703">>: ok
[ns_server:info,2014-08-19T16:49:32.046,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/702">>: ok
[ns_server:info,2014-08-19T16:49:32.048,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/701">>: ok
[ns_server:info,2014-08-19T16:49:32.050,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/700">>: ok
[ns_server:info,2014-08-19T16:49:32.051,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/699">>: ok
[ns_server:info,2014-08-19T16:49:32.053,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/698">>: ok
[ns_server:info,2014-08-19T16:49:32.055,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/697">>: ok
[ns_server:info,2014-08-19T16:49:32.057,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/696">>: ok
[ns_server:info,2014-08-19T16:49:32.059,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/695">>: ok
[ns_server:info,2014-08-19T16:49:32.061,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/694">>: ok
[ns_server:info,2014-08-19T16:49:32.063,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/693">>: ok
[ns_server:info,2014-08-19T16:49:32.065,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/692">>: ok
[ns_server:info,2014-08-19T16:49:32.067,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/691">>: ok
[ns_server:info,2014-08-19T16:49:32.069,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/690">>: ok
[ns_server:info,2014-08-19T16:49:32.070,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/689">>: ok
[ns_server:info,2014-08-19T16:49:32.072,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/688">>: ok
[ns_server:info,2014-08-19T16:49:32.074,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/687">>: ok
[ns_server:info,2014-08-19T16:49:32.076,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/686">>: ok
[ns_server:info,2014-08-19T16:49:32.078,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/685">>: ok
[ns_server:info,2014-08-19T16:49:32.079,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/684">>: ok
[ns_server:info,2014-08-19T16:49:32.081,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/683">>: ok
[ns_server:info,2014-08-19T16:49:32.084,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/682">>: ok
[ns_server:info,2014-08-19T16:49:32.086,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/681">>: ok
[ns_server:info,2014-08-19T16:49:32.088,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/680">>: ok
[ns_server:info,2014-08-19T16:49:32.091,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/679">>: ok
[ns_server:info,2014-08-19T16:49:32.093,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/678">>: ok
[ns_server:info,2014-08-19T16:49:32.096,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/677">>: ok
[ns_server:info,2014-08-19T16:49:32.098,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/676">>: ok
[ns_server:info,2014-08-19T16:49:32.100,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/675">>: ok
[ns_server:info,2014-08-19T16:49:32.102,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/674">>: ok
[ns_server:info,2014-08-19T16:49:32.104,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/673">>: ok
[ns_server:info,2014-08-19T16:49:32.106,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/672">>: ok
[ns_server:info,2014-08-19T16:49:32.109,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/671">>: ok
[ns_server:info,2014-08-19T16:49:32.111,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/670">>: ok
[ns_server:info,2014-08-19T16:49:32.113,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/669">>: ok
[ns_server:info,2014-08-19T16:49:32.115,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/668">>: ok
[ns_server:info,2014-08-19T16:49:32.118,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/667">>: ok
[ns_server:info,2014-08-19T16:49:32.120,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/666">>: ok
[ns_server:info,2014-08-19T16:49:32.122,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/665">>: ok
[ns_server:info,2014-08-19T16:49:32.124,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/664">>: ok
[ns_server:info,2014-08-19T16:49:32.126,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/663">>: ok
[ns_server:info,2014-08-19T16:49:32.128,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/662">>: ok
[ns_server:info,2014-08-19T16:49:32.131,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/661">>: ok
[ns_server:info,2014-08-19T16:49:32.133,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/660">>: ok
[ns_server:info,2014-08-19T16:49:32.135,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/659">>: ok
[ns_server:info,2014-08-19T16:49:32.137,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/658">>: ok
[ns_server:info,2014-08-19T16:49:32.139,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/657">>: ok
[ns_server:info,2014-08-19T16:49:32.141,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/656">>: ok
[ns_server:info,2014-08-19T16:49:32.143,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/655">>: ok
[ns_server:info,2014-08-19T16:49:32.146,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/654">>: ok
[ns_server:info,2014-08-19T16:49:32.147,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/653">>: ok
[ns_server:info,2014-08-19T16:49:32.149,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/652">>: ok
[ns_server:info,2014-08-19T16:49:32.151,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/651">>: ok
[ns_server:info,2014-08-19T16:49:32.154,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/650">>: ok
[ns_server:info,2014-08-19T16:49:32.156,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/649">>: ok
[ns_server:info,2014-08-19T16:49:32.158,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/648">>: ok
[ns_server:info,2014-08-19T16:49:32.160,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/647">>: ok
[ns_server:info,2014-08-19T16:49:32.162,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/646">>: ok
[ns_server:info,2014-08-19T16:49:32.164,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/645">>: ok
[ns_server:info,2014-08-19T16:49:32.166,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/644">>: ok
[ns_server:info,2014-08-19T16:49:32.168,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/643">>: ok
[ns_server:info,2014-08-19T16:49:32.170,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/642">>: ok
[ns_server:info,2014-08-19T16:49:32.173,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/641">>: ok
[ns_server:info,2014-08-19T16:49:32.174,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/640">>: ok
[ns_server:info,2014-08-19T16:49:32.176,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/639">>: ok
[ns_server:info,2014-08-19T16:49:32.179,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/638">>: ok
[ns_server:info,2014-08-19T16:49:32.180,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/637">>: ok
[ns_server:info,2014-08-19T16:49:32.182,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/636">>: ok
[ns_server:info,2014-08-19T16:49:32.185,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/635">>: ok
[ns_server:info,2014-08-19T16:49:32.186,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/634">>: ok
[ns_server:info,2014-08-19T16:49:32.188,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/633">>: ok
[ns_server:info,2014-08-19T16:49:32.190,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/632">>: ok
[ns_server:info,2014-08-19T16:49:32.192,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/631">>: ok
[ns_server:info,2014-08-19T16:49:32.194,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/630">>: ok
[ns_server:info,2014-08-19T16:49:32.196,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/629">>: ok
[ns_server:info,2014-08-19T16:49:32.198,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/628">>: ok
[ns_server:info,2014-08-19T16:49:32.200,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/627">>: ok
[ns_server:info,2014-08-19T16:49:32.202,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/626">>: ok
[ns_server:info,2014-08-19T16:49:32.204,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/625">>: ok
[ns_server:info,2014-08-19T16:49:32.206,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/624">>: ok
[ns_server:info,2014-08-19T16:49:32.208,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/623">>: ok
[ns_server:info,2014-08-19T16:49:32.210,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/622">>: ok
[ns_server:info,2014-08-19T16:49:32.211,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/621">>: ok
[ns_server:info,2014-08-19T16:49:32.213,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/620">>: ok
[ns_server:info,2014-08-19T16:49:32.215,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/619">>: ok
[ns_server:info,2014-08-19T16:49:32.217,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/618">>: ok
[ns_server:info,2014-08-19T16:49:32.219,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/617">>: ok
[ns_server:info,2014-08-19T16:49:32.220,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/616">>: ok
[ns_server:info,2014-08-19T16:49:32.223,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/615">>: ok
[ns_server:info,2014-08-19T16:49:32.225,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/614">>: ok
[ns_server:info,2014-08-19T16:49:32.227,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/613">>: ok
[ns_server:info,2014-08-19T16:49:32.229,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/612">>: ok
[ns_server:info,2014-08-19T16:49:32.231,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/611">>: ok
[ns_server:info,2014-08-19T16:49:32.233,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/610">>: ok
[ns_server:info,2014-08-19T16:49:32.235,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/609">>: ok
[ns_server:info,2014-08-19T16:49:32.237,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/608">>: ok
[ns_server:info,2014-08-19T16:49:32.239,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/607">>: ok
[ns_server:info,2014-08-19T16:49:32.241,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/606">>: ok
[ns_server:info,2014-08-19T16:49:32.243,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/605">>: ok
[ns_server:info,2014-08-19T16:49:32.244,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/604">>: ok
[ns_server:info,2014-08-19T16:49:32.246,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/603">>: ok
[ns_server:info,2014-08-19T16:49:32.248,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/602">>: ok
[ns_server:info,2014-08-19T16:49:32.250,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/601">>: ok
[ns_server:info,2014-08-19T16:49:32.252,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/600">>: ok
[ns_server:info,2014-08-19T16:49:32.254,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/599">>: ok
[ns_server:info,2014-08-19T16:49:32.255,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/598">>: ok
[ns_server:info,2014-08-19T16:49:32.257,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/597">>: ok
[ns_server:info,2014-08-19T16:49:32.258,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/596">>: ok
[ns_server:info,2014-08-19T16:49:32.260,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/595">>: ok
[ns_server:info,2014-08-19T16:49:32.261,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/594">>: ok
[ns_server:info,2014-08-19T16:49:32.262,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/593">>: ok
[ns_server:info,2014-08-19T16:49:32.264,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/592">>: ok
[ns_server:info,2014-08-19T16:49:32.266,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/591">>: ok
[ns_server:info,2014-08-19T16:49:32.267,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/590">>: ok
[ns_server:info,2014-08-19T16:49:32.269,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/589">>: ok
[ns_server:info,2014-08-19T16:49:32.271,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/588">>: ok
[ns_server:info,2014-08-19T16:49:32.272,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/587">>: ok
[ns_server:info,2014-08-19T16:49:32.273,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/586">>: ok
[ns_server:info,2014-08-19T16:49:32.275,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/585">>: ok
[ns_server:info,2014-08-19T16:49:32.277,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/584">>: ok
[ns_server:info,2014-08-19T16:49:32.278,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/583">>: ok
[ns_server:info,2014-08-19T16:49:32.280,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/582">>: ok
[ns_server:info,2014-08-19T16:49:32.281,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/581">>: ok
[ns_server:info,2014-08-19T16:49:32.282,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/580">>: ok
[ns_server:info,2014-08-19T16:49:32.284,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/579">>: ok
[ns_server:info,2014-08-19T16:49:32.285,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/578">>: ok
[ns_server:info,2014-08-19T16:49:32.287,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/577">>: ok
[ns_server:info,2014-08-19T16:49:32.289,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/576">>: ok
[ns_server:info,2014-08-19T16:49:32.291,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/575">>: ok
[ns_server:info,2014-08-19T16:49:32.292,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/574">>: ok
[ns_server:info,2014-08-19T16:49:32.294,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/573">>: ok
[ns_server:info,2014-08-19T16:49:32.295,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/572">>: ok
[ns_server:info,2014-08-19T16:49:32.297,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/571">>: ok
[ns_server:info,2014-08-19T16:49:32.299,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/570">>: ok
[ns_server:info,2014-08-19T16:49:32.300,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/569">>: ok
[ns_server:info,2014-08-19T16:49:32.302,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/568">>: ok
[ns_server:info,2014-08-19T16:49:32.303,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/567">>: ok
[ns_server:info,2014-08-19T16:49:32.305,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/566">>: ok
[ns_server:info,2014-08-19T16:49:32.306,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/565">>: ok
[ns_server:info,2014-08-19T16:49:32.308,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/564">>: ok
[ns_server:info,2014-08-19T16:49:32.309,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/563">>: ok
[ns_server:info,2014-08-19T16:49:32.310,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/562">>: ok
[ns_server:info,2014-08-19T16:49:32.311,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/561">>: ok
[ns_server:info,2014-08-19T16:49:32.313,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/560">>: ok
[ns_server:info,2014-08-19T16:49:32.314,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/559">>: ok
[ns_server:info,2014-08-19T16:49:32.316,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/558">>: ok
[ns_server:info,2014-08-19T16:49:32.318,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/557">>: ok
[ns_server:info,2014-08-19T16:49:32.319,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/556">>: ok
[ns_server:info,2014-08-19T16:49:32.321,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/555">>: ok
[ns_server:info,2014-08-19T16:49:32.322,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/554">>: ok
[ns_server:info,2014-08-19T16:49:32.324,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/553">>: ok
[ns_server:info,2014-08-19T16:49:32.326,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/552">>: ok
[ns_server:info,2014-08-19T16:49:32.327,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/551">>: ok
[ns_server:info,2014-08-19T16:49:32.329,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/550">>: ok
[ns_server:info,2014-08-19T16:49:32.331,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/549">>: ok
[ns_server:info,2014-08-19T16:49:32.332,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/548">>: ok
[ns_server:info,2014-08-19T16:49:32.334,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/547">>: ok
[ns_server:info,2014-08-19T16:49:32.336,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/546">>: ok
[ns_server:info,2014-08-19T16:49:32.337,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/545">>: ok
[ns_server:info,2014-08-19T16:49:32.339,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/544">>: ok
[ns_server:info,2014-08-19T16:49:32.341,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/543">>: ok
[ns_server:info,2014-08-19T16:49:32.342,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/542">>: ok
[ns_server:info,2014-08-19T16:49:32.344,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/541">>: ok
[ns_server:info,2014-08-19T16:49:32.345,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/540">>: ok
[ns_server:info,2014-08-19T16:49:32.347,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/539">>: ok
[ns_server:info,2014-08-19T16:49:32.349,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/538">>: ok
[ns_server:info,2014-08-19T16:49:32.350,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/537">>: ok
[ns_server:info,2014-08-19T16:49:32.352,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/536">>: ok
[ns_server:info,2014-08-19T16:49:32.354,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/535">>: ok
[ns_server:info,2014-08-19T16:49:32.355,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/534">>: ok
[ns_server:info,2014-08-19T16:49:32.357,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/533">>: ok
[ns_server:info,2014-08-19T16:49:32.358,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/532">>: ok
[ns_server:info,2014-08-19T16:49:32.360,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/531">>: ok
[ns_server:info,2014-08-19T16:49:32.361,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/530">>: ok
[ns_server:info,2014-08-19T16:49:32.363,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/529">>: ok
[ns_server:info,2014-08-19T16:49:32.364,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/528">>: ok
[ns_server:info,2014-08-19T16:49:32.366,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/527">>: ok
[ns_server:info,2014-08-19T16:49:32.367,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/526">>: ok
[ns_server:info,2014-08-19T16:49:32.369,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/525">>: ok
[ns_server:info,2014-08-19T16:49:32.371,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/524">>: ok
[ns_server:info,2014-08-19T16:49:32.372,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/523">>: ok
[ns_server:info,2014-08-19T16:49:32.374,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/522">>: ok
[ns_server:info,2014-08-19T16:49:32.375,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/521">>: ok
[ns_server:info,2014-08-19T16:49:32.377,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/520">>: ok
[ns_server:info,2014-08-19T16:49:32.378,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/519">>: ok
[ns_server:info,2014-08-19T16:49:32.380,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/518">>: ok
[ns_server:info,2014-08-19T16:49:32.381,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/517">>: ok
[ns_server:info,2014-08-19T16:49:32.383,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/516">>: ok
[ns_server:info,2014-08-19T16:49:32.384,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/515">>: ok
[ns_server:info,2014-08-19T16:49:32.386,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/514">>: ok
[ns_server:info,2014-08-19T16:49:32.387,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/513">>: ok
[ns_server:info,2014-08-19T16:49:32.389,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/512">>: ok
[ns_server:info,2014-08-19T16:49:32.390,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/426">>: ok
[ns_server:info,2014-08-19T16:49:32.391,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/425">>: ok
[ns_server:info,2014-08-19T16:49:32.393,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/424">>: ok
[ns_server:info,2014-08-19T16:49:32.394,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/423">>: ok
[ns_server:info,2014-08-19T16:49:32.396,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/422">>: ok
[ns_server:info,2014-08-19T16:49:32.397,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/421">>: ok
[ns_server:info,2014-08-19T16:49:32.399,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/420">>: ok
[ns_server:info,2014-08-19T16:49:32.400,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/419">>: ok
[ns_server:info,2014-08-19T16:49:32.402,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/418">>: ok
[ns_server:info,2014-08-19T16:49:32.403,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/417">>: ok
[ns_server:info,2014-08-19T16:49:32.404,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/416">>: ok
[ns_server:info,2014-08-19T16:49:32.406,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/415">>: ok
[ns_server:info,2014-08-19T16:49:32.407,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/414">>: ok
[ns_server:info,2014-08-19T16:49:32.408,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/413">>: ok
[ns_server:info,2014-08-19T16:49:32.409,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/412">>: ok
[ns_server:info,2014-08-19T16:49:32.411,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/411">>: ok
[ns_server:info,2014-08-19T16:49:32.412,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/410">>: ok
[ns_server:info,2014-08-19T16:49:32.413,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/409">>: ok
[ns_server:info,2014-08-19T16:49:32.414,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/408">>: ok
[ns_server:info,2014-08-19T16:49:32.416,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/407">>: ok
[ns_server:info,2014-08-19T16:49:32.417,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/406">>: ok
[ns_server:info,2014-08-19T16:49:32.419,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/405">>: ok
[ns_server:info,2014-08-19T16:49:32.420,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/404">>: ok
[ns_server:info,2014-08-19T16:49:32.421,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/403">>: ok
[ns_server:info,2014-08-19T16:49:32.423,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/402">>: ok
[ns_server:info,2014-08-19T16:49:32.424,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/401">>: ok
[ns_server:info,2014-08-19T16:49:32.425,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/400">>: ok
[ns_server:info,2014-08-19T16:49:32.427,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/399">>: ok
[ns_server:info,2014-08-19T16:49:32.428,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/398">>: ok
[ns_server:info,2014-08-19T16:49:32.430,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/397">>: ok
[ns_server:info,2014-08-19T16:49:32.431,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/396">>: ok
[ns_server:info,2014-08-19T16:49:32.432,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/395">>: ok
[ns_server:info,2014-08-19T16:49:32.434,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/394">>: ok
[ns_server:info,2014-08-19T16:49:32.435,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/393">>: ok
[ns_server:info,2014-08-19T16:49:32.436,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/392">>: ok
[ns_server:info,2014-08-19T16:49:32.438,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/391">>: ok
[ns_server:info,2014-08-19T16:49:32.439,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/390">>: ok
[ns_server:info,2014-08-19T16:49:32.440,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/389">>: ok
[ns_server:info,2014-08-19T16:49:32.441,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/388">>: ok
[ns_server:info,2014-08-19T16:49:32.443,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/387">>: ok
[ns_server:info,2014-08-19T16:49:32.444,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/386">>: ok
[ns_server:info,2014-08-19T16:49:32.445,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/385">>: ok
[ns_server:info,2014-08-19T16:49:32.446,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/384">>: ok
[ns_server:info,2014-08-19T16:49:32.447,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/383">>: ok
[ns_server:info,2014-08-19T16:49:32.449,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/382">>: ok
[ns_server:info,2014-08-19T16:49:32.450,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/381">>: ok
[ns_server:info,2014-08-19T16:49:32.451,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/380">>: ok
[ns_server:info,2014-08-19T16:49:32.453,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/379">>: ok
[ns_server:info,2014-08-19T16:49:32.454,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/378">>: ok
[ns_server:info,2014-08-19T16:49:32.455,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/377">>: ok
[ns_server:info,2014-08-19T16:49:32.457,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/376">>: ok
[ns_server:info,2014-08-19T16:49:32.458,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/375">>: ok
[ns_server:info,2014-08-19T16:49:32.459,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/374">>: ok
[ns_server:info,2014-08-19T16:49:32.460,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/373">>: ok
[ns_server:info,2014-08-19T16:49:32.462,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/372">>: ok
[ns_server:info,2014-08-19T16:49:32.463,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/371">>: ok
[ns_server:info,2014-08-19T16:49:32.464,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/370">>: ok
[ns_server:info,2014-08-19T16:49:32.465,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/369">>: ok
[ns_server:info,2014-08-19T16:49:32.466,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/368">>: ok
[ns_server:info,2014-08-19T16:49:32.468,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/367">>: ok
[ns_server:info,2014-08-19T16:49:32.469,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/366">>: ok
[ns_server:info,2014-08-19T16:49:32.470,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/365">>: ok
[ns_server:info,2014-08-19T16:49:32.472,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/364">>: ok
[ns_server:info,2014-08-19T16:49:32.473,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/363">>: ok
[ns_server:info,2014-08-19T16:49:32.474,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/362">>: ok
[ns_server:info,2014-08-19T16:49:32.475,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/361">>: ok
[ns_server:info,2014-08-19T16:49:32.477,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/360">>: ok
[ns_server:info,2014-08-19T16:49:32.478,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/359">>: ok
[ns_server:info,2014-08-19T16:49:32.479,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/358">>: ok
[ns_server:info,2014-08-19T16:49:32.480,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/357">>: ok
[ns_server:info,2014-08-19T16:49:32.481,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/356">>: ok
[ns_server:info,2014-08-19T16:49:32.483,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/355">>: ok
[ns_server:info,2014-08-19T16:49:32.484,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/354">>: ok
[ns_server:info,2014-08-19T16:49:32.485,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/353">>: ok
[ns_server:info,2014-08-19T16:49:32.486,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/352">>: ok
[ns_server:info,2014-08-19T16:49:32.487,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/351">>: ok
[ns_server:info,2014-08-19T16:49:32.488,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/350">>: ok
[ns_server:info,2014-08-19T16:49:32.489,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/349">>: ok
[ns_server:info,2014-08-19T16:49:32.490,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/348">>: ok
[ns_server:info,2014-08-19T16:49:32.492,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/347">>: ok
[ns_server:info,2014-08-19T16:49:32.493,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/346">>: ok
[ns_server:info,2014-08-19T16:49:32.494,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/345">>: ok
[ns_server:info,2014-08-19T16:49:32.495,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/344">>: ok
[ns_server:info,2014-08-19T16:49:32.496,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/343">>: ok
[ns_server:info,2014-08-19T16:49:32.497,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/342">>: ok
[ns_server:info,2014-08-19T16:49:32.498,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/170">>: ok
[ns_server:info,2014-08-19T16:49:32.500,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/169">>: ok
[ns_server:info,2014-08-19T16:49:32.501,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/168">>: ok
[ns_server:info,2014-08-19T16:49:32.502,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/167">>: ok
[ns_server:info,2014-08-19T16:49:32.503,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/166">>: ok
[ns_server:info,2014-08-19T16:49:32.504,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/165">>: ok
[ns_server:info,2014-08-19T16:49:32.505,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/164">>: ok
[ns_server:info,2014-08-19T16:49:32.506,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/163">>: ok
[ns_server:info,2014-08-19T16:49:32.507,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/162">>: ok
[ns_server:info,2014-08-19T16:49:32.508,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/161">>: ok
[ns_server:info,2014-08-19T16:49:32.509,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/160">>: ok
[ns_server:info,2014-08-19T16:49:32.510,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/159">>: ok
[ns_server:info,2014-08-19T16:49:32.511,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/158">>: ok
[ns_server:info,2014-08-19T16:49:32.512,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/157">>: ok
[ns_server:info,2014-08-19T16:49:32.513,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/156">>: ok
[ns_server:info,2014-08-19T16:49:32.514,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/155">>: ok
[ns_server:info,2014-08-19T16:49:32.515,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/154">>: ok
[ns_server:info,2014-08-19T16:49:32.516,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/153">>: ok
[ns_server:info,2014-08-19T16:49:32.517,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/152">>: ok
[ns_server:info,2014-08-19T16:49:32.518,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/151">>: ok
[ns_server:info,2014-08-19T16:49:32.519,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/150">>: ok
[ns_server:info,2014-08-19T16:49:32.520,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/149">>: ok
[ns_server:info,2014-08-19T16:49:32.521,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/148">>: ok
[ns_server:info,2014-08-19T16:49:32.522,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/147">>: ok
[ns_server:info,2014-08-19T16:49:32.524,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/146">>: ok
[ns_server:info,2014-08-19T16:49:32.525,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/145">>: ok
[ns_server:info,2014-08-19T16:49:32.525,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/144">>: ok
[ns_server:info,2014-08-19T16:49:32.526,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/143">>: ok
[ns_server:info,2014-08-19T16:49:32.527,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/142">>: ok
[ns_server:info,2014-08-19T16:49:32.528,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/141">>: ok
[ns_server:info,2014-08-19T16:49:32.529,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/140">>: ok
[ns_server:info,2014-08-19T16:49:32.530,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/139">>: ok
[ns_server:info,2014-08-19T16:49:32.531,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/138">>: ok
[ns_server:info,2014-08-19T16:49:32.532,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/137">>: ok
[ns_server:info,2014-08-19T16:49:32.533,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/136">>: ok
[ns_server:info,2014-08-19T16:49:32.534,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/135">>: ok
[ns_server:info,2014-08-19T16:49:32.535,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/134">>: ok
[ns_server:info,2014-08-19T16:49:32.536,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/133">>: ok
[ns_server:info,2014-08-19T16:49:32.537,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/132">>: ok
[ns_server:info,2014-08-19T16:49:32.538,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/131">>: ok
[ns_server:info,2014-08-19T16:49:32.539,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/130">>: ok
[ns_server:info,2014-08-19T16:49:32.540,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/129">>: ok
[ns_server:info,2014-08-19T16:49:32.541,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/128">>: ok
[ns_server:info,2014-08-19T16:49:32.542,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/127">>: ok
[ns_server:info,2014-08-19T16:49:32.543,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/126">>: ok
[ns_server:info,2014-08-19T16:49:32.544,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/125">>: ok
[ns_server:info,2014-08-19T16:49:32.544,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/124">>: ok
[ns_server:info,2014-08-19T16:49:32.546,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/123">>: ok
[ns_server:info,2014-08-19T16:49:32.546,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/122">>: ok
[ns_server:info,2014-08-19T16:49:32.547,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/121">>: ok
[ns_server:info,2014-08-19T16:49:32.548,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/120">>: ok
[ns_server:info,2014-08-19T16:49:32.549,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/119">>: ok
[ns_server:info,2014-08-19T16:49:32.550,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/118">>: ok
[ns_server:info,2014-08-19T16:49:32.551,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/117">>: ok
[ns_server:info,2014-08-19T16:49:32.552,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/116">>: ok
[ns_server:info,2014-08-19T16:49:32.553,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/115">>: ok
[ns_server:info,2014-08-19T16:49:32.553,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/114">>: ok
[ns_server:info,2014-08-19T16:49:32.554,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/113">>: ok
[ns_server:info,2014-08-19T16:49:32.555,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/112">>: ok
[ns_server:info,2014-08-19T16:49:32.556,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/111">>: ok
[ns_server:info,2014-08-19T16:49:32.557,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/110">>: ok
[ns_server:info,2014-08-19T16:49:32.557,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/109">>: ok
[ns_server:info,2014-08-19T16:49:32.558,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/108">>: ok
[ns_server:info,2014-08-19T16:49:32.559,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/107">>: ok
[ns_server:info,2014-08-19T16:49:32.560,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/106">>: ok
[ns_server:info,2014-08-19T16:49:32.561,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/105">>: ok
[ns_server:info,2014-08-19T16:49:32.562,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/104">>: ok
[ns_server:info,2014-08-19T16:49:32.562,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/103">>: ok
[ns_server:info,2014-08-19T16:49:32.563,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1023">>: ok
[ns_server:info,2014-08-19T16:49:32.564,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1022">>: ok
[ns_server:info,2014-08-19T16:49:32.565,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1021">>: ok
[ns_server:info,2014-08-19T16:49:32.566,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1020">>: ok
[ns_server:info,2014-08-19T16:49:32.566,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/102">>: ok
[ns_server:info,2014-08-19T16:49:32.567,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1019">>: ok
[ns_server:info,2014-08-19T16:49:32.568,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1018">>: ok
[ns_server:info,2014-08-19T16:49:32.568,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1017">>: ok
[ns_server:info,2014-08-19T16:49:32.569,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1016">>: ok
[ns_server:info,2014-08-19T16:49:32.570,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1015">>: ok
[ns_server:info,2014-08-19T16:49:32.570,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1014">>: ok
[ns_server:info,2014-08-19T16:49:32.571,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1013">>: ok
[ns_server:info,2014-08-19T16:49:32.572,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1012">>: ok
[ns_server:info,2014-08-19T16:49:32.573,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1011">>: ok
[ns_server:info,2014-08-19T16:49:32.573,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1010">>: ok
[ns_server:info,2014-08-19T16:49:32.574,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/101">>: ok
[ns_server:info,2014-08-19T16:49:32.575,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1009">>: ok
[ns_server:info,2014-08-19T16:49:32.576,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1008">>: ok
[ns_server:info,2014-08-19T16:49:32.576,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1007">>: ok
[ns_server:info,2014-08-19T16:49:32.577,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1006">>: ok
[ns_server:info,2014-08-19T16:49:32.577,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1005">>: ok
[ns_server:info,2014-08-19T16:49:32.578,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1004">>: ok
[ns_server:info,2014-08-19T16:49:32.579,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1003">>: ok
[ns_server:info,2014-08-19T16:49:32.579,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1002">>: ok
[ns_server:info,2014-08-19T16:49:32.580,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1001">>: ok
[ns_server:info,2014-08-19T16:49:32.581,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/1000">>: ok
[ns_server:info,2014-08-19T16:49:32.582,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_couch_database:389]Deleting database <<"default/100">>: ok
[ns_server:info,2014-08-19T16:49:32.582,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:delete_databases_and_files:436]Couch dbs are deleted. Proceeding with bucket directory
[ns_server:debug,2014-08-19T16:49:32.582,ns_1@10.242.238.90:<0.17720.0>:ns_storage_conf:do_delete_bucket_indexes:457]indexes directory doesn't exist already. fine.
[ns_server:info,2014-08-19T16:49:32.729,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:do_pull:341]Pulling config from: 'ns_1@10.242.238.91'
[ns_server:debug,2014-08-19T16:49:32.885,ns_1@10.242.238.90:ns_bucket_worker<0.17558.0>:ns_bucket_sup:update_childs:84]Starting new child: {{per_bucket_sup,"default"},
{single_bucket_sup,start_link,["default"]},
permanent,infinity,supervisor,
[single_bucket_sup]}
[ns_server:debug,2014-08-19T16:49:32.885,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[error_logger:info,2014-08-19T16:49:32.887,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.18759.0>},
{name,{per_bucket_sup,"default"}},
{mfargs,{single_bucket_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:32.955,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:init:228]Usable vbuckets:
[]
[ns_server:debug,2014-08-19T16:49:32.955,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:32.955,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:init:144]Starting ns_memcached
[error_logger:info,2014-08-19T16:49:32.955,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18761.0>},
{name,{capi_set_view_manager,"default"}},
{mfargs,{capi_set_view_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:32.955,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:32.956,ns_1@10.242.238.90:<0.18774.0>:ns_memcached:run_connect_phase:167]Started 'connecting' phase of ns_memcached-default. Parent is <0.18773.0>
[error_logger:info,2014-08-19T16:49:32.956,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18773.0>},
{name,{ns_memcached,"default"}},
{mfargs,{ns_memcached,start_link,["default"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:32.958,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18775.0>},
{name,{tap_replication_manager,"default"}},
{mfargs,
{tap_replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:32.959,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18776.0>},
{name,{ns_vbm_new_sup,"default"}},
{mfargs,{ns_vbm_new_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2014-08-19T16:49:32.961,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18777.0>},
{name,{ns_vbm_sup,"default"}},
{mfargs,{ns_vbm_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:debug,2014-08-19T16:49:32.964,ns_1@10.242.238.90:<0.17535.0>:mc_tcp_listener:accept_loop:31]Got new connection
[ns_server:info,2014-08-19T16:49:32.964,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:read_flush_counter:936]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[ns_server:info,2014-08-19T16:49:32.965,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:read_flush_counter_from_config:943]Initialized flushseq 0 from bucket config
[error_logger:info,2014-08-19T16:49:32.965,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18778.0>},
{name,{janitor_agent,"default"}},
{mfargs,{janitor_agent,start_link,["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:32.966,ns_1@10.242.238.90:<0.17535.0>:mc_tcp_listener:accept_loop:33]Passed connection to mc_conn_sup: <0.18779.0>
[error_logger:info,2014-08-19T16:49:32.968,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18780.0>},
{name,{couch_stats_reader,"default"}},
{mfargs,{couch_stats_reader,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:32.968,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18781.0>},
{name,{stats_collector,"default"}},
{mfargs,{stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:32.968,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:ensure_bucket:1178]Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=13369344000;tap_keepalive=300;dbname=/var/lib/pgsql/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=1024;alog_path=/var/lib/pgsql/default/access.log;data_traffic_enabled=false;max_num_workers=3;uuid=d95ae85dc319bab78fd23c50f6adae2e;vb0=false;waitforwarmup=false;failpartialwarmup=false;"
[ns_server:info,2014-08-19T16:49:32.969,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:handle_cast:609]Main ns_memcached connection established: {ok,#Port<0.13274>}
[ns_server:debug,2014-08-19T16:49:32.969,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[error_logger:info,2014-08-19T16:49:32.970,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18783.0>},
{name,{stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:32.970,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18791.0>},
{name,{stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2014-08-19T16:49:32.970,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18793.0>},
{name,{failover_safeness_level,"default"}},
{mfargs,
{failover_safeness_level,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2014-08-19T16:49:32.971,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:handle_cast:632]Bucket "default" loaded on node 'ns_1@10.242.238.90' in 0 seconds.
[error_logger:info,2014-08-19T16:49:32.971,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_sup-default'}
started: [{pid,<0.18794.0>},
{name,{terse_bucket_info_uploader,"default"}},
{mfargs,
{terse_bucket_info_uploader,start_link,
["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:32.983,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:33.159,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:current_status_slow:261]Ignoring failure to get stats for bucket: "default":
{error,no_samples}
[ns_server:debug,2014-08-19T16:49:33.336,ns_1@10.242.238.90:ns_heart_slow_status_updater<0.17440.0>:ns_heart:current_status_slow:261]Ignoring failure to get stats for bucket: "default":
{error,no_samples}
[ns_server:debug,2014-08-19T16:49:33.475,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:handle_info:447]Starting compaction for the following buckets:
[<<"default">>]
[ns_server:info,2014-08-19T16:49:33.476,ns_1@10.242.238.90:<0.18821.0>:compaction_daemon:try_to_cleanup_indexes:650]Cleaning up indexes for bucket `default`
[ns_server:info,2014-08-19T16:49:33.480,ns_1@10.242.238.90:<0.18821.0>:compaction_daemon:spawn_bucket_compactor:609]Compacting bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2014-08-19T16:49:33.481,ns_1@10.242.238.90:<0.18824.0>:compaction_daemon:bucket_needs_compaction:1042]`default` data size is 0, disk size is 34
[ns_server:debug,2014-08-19T16:49:33.481,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:handle_info:505]Finished compaction iteration.
[ns_server:debug,2014-08-19T16:49:33.481,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:info,2014-08-19T16:49:33.936,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:handle_call:247]Enabling traffic to bucket "default"
[ns_server:info,2014-08-19T16:49:33.936,ns_1@10.242.238.90:ns_memcached-default<0.18773.0>:ns_memcached:handle_call:251]Bucket "default" marked as warmed in 0 seconds
[ns_server:debug,2014-08-19T16:49:34.000,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:34.000,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:34.004,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
vbucket_map_history ->
[{[['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.89'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88','ns_1@10.242.238.90'],
['ns_1@10.242.238.88'|...],
[...]|...],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]},
{[['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.88'|...],
[...]|...],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2014-08-19T16:49:34.010,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:34.011,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:34.016,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[]},
{fastForwardMap,[{0,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{1,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{2,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{3,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{4,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{5,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{6,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{7,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{8,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{9,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{10,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{11,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{12,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{13,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{14,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{15,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{16,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{17,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{18,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{19,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{20,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{21,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{22,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{23,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{24,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{25,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{26,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{27,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{28,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{29,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{30,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{31,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{32,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{33,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{34,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{35,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{36,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{37,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{38,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{39,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{40,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{41,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{42,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{43,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{44,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{45,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{46,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{47,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{48,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{49,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{50,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{51,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{52,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{53,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{54,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{55,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{56,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{57,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{58,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{59,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{60,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{61,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{62,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{63,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{64,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{65,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{66,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{67,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{68,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{69,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{70,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{71,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{72,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{73,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{74,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{75,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{76,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{77,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{78,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{79,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{80,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{81,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{82,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{83,[],
['ns_1@10.242.238.88','ns_1@10.242.238.89']},
{84,[],['ns_1@10.242.238.88'|...]},
{85,[],[...]},
{86,[],...},
{87,...},
{...}|...]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:34.063,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1023 state to replica
[ns_server:info,2014-08-19T16:49:34.091,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1023 to state replica
[ns_server:debug,2014-08-19T16:49:34.138,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1023. Nacking mccouch update.
[views:debug,2014-08-19T16:49:34.138,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1023. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:34.139,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[1023]
[ns_server:debug,2014-08-19T16:49:34.139,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1023,replica,0}
[views:debug,2014-08-19T16:49:34.181,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1023. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:34.181,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1023,replica,0}
[ns_server:debug,2014-08-19T16:49:34.183,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1023_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:34.185,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1023]},
{checkpoints,[{1023,0}]},
{name,<<"replication_building_1023_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1023]},
{takeover,false},
{suffix,"building_1023_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1023,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:34.185,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18843.0>
[rebalance:debug,2014-08-19T16:49:34.185,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:34.186,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25772.0>,#Ref<16550.0.1.16173>}]}
[rebalance:info,2014-08-19T16:49:34.186,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1023
[rebalance:debug,2014-08-19T16:49:34.186,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25772.0>,#Ref<16550.0.1.16173>}]
[ns_server:debug,2014-08-19T16:49:34.187,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:34.208,ns_1@10.242.238.90:<0.18844.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1023
[ns_server:info,2014-08-19T16:49:34.214,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 767 state to replica
[ns_server:info,2014-08-19T16:49:34.220,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 767 to state replica
[ns_server:debug,2014-08-19T16:49:34.307,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 767. Nacking mccouch update.
[views:debug,2014-08-19T16:49:34.307,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/767. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:34.307,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",767,pending,0}
[ns_server:debug,2014-08-19T16:49:34.308,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[767,1023]
[ns_server:debug,2014-08-19T16:49:34.325,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_767_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:34.327,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[767]},
{checkpoints,[{767,0}]},
{name,<<"replication_building_767_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[767]},
{takeover,false},
{suffix,"building_767_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",767,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:34.327,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18862.0>
[rebalance:debug,2014-08-19T16:49:34.328,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:34.328,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25806.0>,#Ref<16550.0.1.18163>}]}
[rebalance:info,2014-08-19T16:49:34.328,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 767
[rebalance:debug,2014-08-19T16:49:34.329,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25806.0>,#Ref<16550.0.1.18163>}]
[ns_server:debug,2014-08-19T16:49:34.329,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18863.0> (ok)
[ns_server:debug,2014-08-19T16:49:34.329,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:34.331,ns_1@10.242.238.90:<0.18864.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 767
[views:debug,2014-08-19T16:49:34.341,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/767. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:34.341,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",767,pending,0}
[rebalance:debug,2014-08-19T16:49:34.342,ns_1@10.242.238.90:<0.18864.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:34.342,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18864.0> (ok)
[rebalance:debug,2014-08-19T16:49:34.380,ns_1@10.242.238.90:<0.18844.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:34.380,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18844.0> (ok)
[ns_server:info,2014-08-19T16:49:34.465,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1022 state to replica
[ns_server:info,2014-08-19T16:49:34.471,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1022 to state replica
[ns_server:debug,2014-08-19T16:49:34.561,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1022_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:34.562,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1022]},
{checkpoints,[{1022,0}]},
{name,<<"replication_building_1022_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1022]},
{takeover,false},
{suffix,"building_1022_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1022,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:34.563,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18882.0>
[rebalance:debug,2014-08-19T16:49:34.563,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:34.563,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25863.0>,#Ref<16550.0.1.18481>}]}
[rebalance:info,2014-08-19T16:49:34.563,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1022
[rebalance:debug,2014-08-19T16:49:34.564,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25863.0>,#Ref<16550.0.1.18481>}]
[ns_server:debug,2014-08-19T16:49:34.565,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:34.585,ns_1@10.242.238.90:<0.18883.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1022
[ns_server:info,2014-08-19T16:49:34.591,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 766 state to replica
[ns_server:info,2014-08-19T16:49:34.598,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 766 to state replica
[ns_server:debug,2014-08-19T16:49:34.606,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1022. Nacking mccouch update.
[views:debug,2014-08-19T16:49:34.606,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1022. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:34.606,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1022,replica,0}
[ns_server:debug,2014-08-19T16:49:34.606,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[1022,767,1023]
[views:debug,2014-08-19T16:49:34.673,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1022. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:34.673,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1022,replica,0}
[ns_server:debug,2014-08-19T16:49:34.704,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_766_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:34.705,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[766]},
{checkpoints,[{766,0}]},
{name,<<"replication_building_766_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[766]},
{takeover,false},
{suffix,"building_766_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",766,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:34.706,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18887.0>
[rebalance:debug,2014-08-19T16:49:34.706,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:34.706,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25883.0>,#Ref<16550.0.1.18621>}]}
[rebalance:info,2014-08-19T16:49:34.706,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 766
[rebalance:debug,2014-08-19T16:49:34.707,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25883.0>,#Ref<16550.0.1.18621>}]
[ns_server:debug,2014-08-19T16:49:34.707,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18888.0> (ok)
[ns_server:debug,2014-08-19T16:49:34.707,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:34.709,ns_1@10.242.238.90:<0.18889.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 766
[ns_server:info,2014-08-19T16:49:34.769,ns_1@10.242.238.90:ns_doctor<0.17441.0>:ns_doctor:update_status:241]The following buckets became ready on node 'ns_1@10.242.238.89': ["default"]
[ns_server:debug,2014-08-19T16:49:34.806,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 766. Nacking mccouch update.
[views:debug,2014-08-19T16:49:34.807,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/766. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:34.807,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",766,pending,0}
[ns_server:debug,2014-08-19T16:49:34.807,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1022,767,1023]
[ns_server:info,2014-08-19T16:49:34.838,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1021 state to replica
[ns_server:info,2014-08-19T16:49:34.842,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1021 to state replica
[views:debug,2014-08-19T16:49:34.882,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/766. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:34.882,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",766,pending,0}
[rebalance:debug,2014-08-19T16:49:34.884,ns_1@10.242.238.90:<0.18889.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:34.884,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18889.0> (ok)
[ns_server:debug,2014-08-19T16:49:34.932,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1021_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:34.933,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1021]},
{checkpoints,[{1021,0}]},
{name,<<"replication_building_1021_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1021]},
{takeover,false},
{suffix,"building_1021_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1021,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:34.934,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18907.0>
[rebalance:debug,2014-08-19T16:49:34.934,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:34.934,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25958.0>,#Ref<16550.0.1.19028>}]}
[rebalance:info,2014-08-19T16:49:34.934,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1021
[rebalance:debug,2014-08-19T16:49:34.935,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25958.0>,#Ref<16550.0.1.19028>}]
[ns_server:debug,2014-08-19T16:49:34.936,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:34.956,ns_1@10.242.238.90:<0.18908.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1021
[ns_server:info,2014-08-19T16:49:34.962,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 765 state to replica
[ns_server:info,2014-08-19T16:49:34.969,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 765 to state replica
[ns_server:debug,2014-08-19T16:49:35.073,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_765_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:35.074,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[765]},
{checkpoints,[{765,0}]},
{name,<<"replication_building_765_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[765]},
{takeover,false},
{suffix,"building_765_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",765,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:35.075,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18926.0>
[rebalance:debug,2014-08-19T16:49:35.075,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:35.075,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.25992.0>,#Ref<16550.0.1.19197>}]}
[rebalance:info,2014-08-19T16:49:35.075,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 765
[rebalance:debug,2014-08-19T16:49:35.076,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.25992.0>,#Ref<16550.0.1.19197>}]
[ns_server:debug,2014-08-19T16:49:35.076,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18927.0> (ok)
[ns_server:debug,2014-08-19T16:49:35.077,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:35.078,ns_1@10.242.238.90:<0.18928.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 765
[ns_server:debug,2014-08-19T16:49:35.099,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1021. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.099,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1021. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.099,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1021,replica,0}
[ns_server:debug,2014-08-19T16:49:35.099,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1022,1021,767,1023]
[views:debug,2014-08-19T16:49:35.175,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1021. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.175,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1021,replica,0}
[rebalance:debug,2014-08-19T16:49:35.176,ns_1@10.242.238.90:<0.18883.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:35.176,ns_1@10.242.238.90:<0.18928.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.176,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18883.0> (ok)
[ns_server:debug,2014-08-19T16:49:35.176,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18928.0> (ok)
[ns_server:info,2014-08-19T16:49:35.210,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1020 state to replica
[ns_server:info,2014-08-19T16:49:35.214,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1020 to state replica
[ns_server:debug,2014-08-19T16:49:35.303,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1020_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:35.304,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1020]},
{checkpoints,[{1020,0}]},
{name,<<"replication_building_1020_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1020]},
{takeover,false},
{suffix,"building_1020_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1020,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:35.305,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18946.0>
[rebalance:debug,2014-08-19T16:49:35.305,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:35.305,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26054.0>,#Ref<16550.0.1.19522>}]}
[rebalance:info,2014-08-19T16:49:35.306,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1020
[rebalance:debug,2014-08-19T16:49:35.306,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26054.0>,#Ref<16550.0.1.19522>}]
[ns_server:debug,2014-08-19T16:49:35.307,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:35.330,ns_1@10.242.238.90:<0.18947.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 766
[rebalance:debug,2014-08-19T16:49:35.330,ns_1@10.242.238.90:<0.18948.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1020
[rebalance:debug,2014-08-19T16:49:35.331,ns_1@10.242.238.90:<0.18953.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 767
[ns_server:debug,2014-08-19T16:49:35.333,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 765. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.333,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/765. Updated state: pending (1)
[ns_server:debug,2014-08-19T16:49:35.334,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",765,pending,1}
[ns_server:debug,2014-08-19T16:49:35.334,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1022,765,1021,767,1023]
[ns_server:info,2014-08-19T16:49:35.337,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 764 state to replica
[ns_server:info,2014-08-19T16:49:35.345,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 764 to state replica
[views:debug,2014-08-19T16:49:35.409,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/765. Updated state: pending (1)
[ns_server:debug,2014-08-19T16:49:35.409,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",765,pending,1}
[ns_server:debug,2014-08-19T16:49:35.450,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_764_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:35.451,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[764]},
{checkpoints,[{764,0}]},
{name,<<"replication_building_764_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[764]},
{takeover,false},
{suffix,"building_764_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",764,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:35.452,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18963.0>
[rebalance:debug,2014-08-19T16:49:35.452,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:35.452,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26096.0>,#Ref<16550.0.1.19763>}]}
[rebalance:info,2014-08-19T16:49:35.453,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 764
[rebalance:debug,2014-08-19T16:49:35.453,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26096.0>,#Ref<16550.0.1.19763>}]
[ns_server:debug,2014-08-19T16:49:35.453,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:35.453,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18964.0> (ok)
[rebalance:debug,2014-08-19T16:49:35.455,ns_1@10.242.238.90:<0.18965.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 764
[ns_server:debug,2014-08-19T16:49:35.550,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1020. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.550,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1020. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.550,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1020,replica,0}
[ns_server:debug,2014-08-19T16:49:35.550,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1022,765,1021,767,1020,1023]
[views:debug,2014-08-19T16:49:35.584,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1020. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.584,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1020,replica,0}
[ns_server:info,2014-08-19T16:49:35.584,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1019 state to replica
[ns_server:info,2014-08-19T16:49:35.588,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1019 to state replica
[ns_server:debug,2014-08-19T16:49:35.650,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 764. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.651,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/764. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:35.651,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",764,pending,0}
[ns_server:debug,2014-08-19T16:49:35.651,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1022,765,1021,764,767,1020,1023]
[ns_server:debug,2014-08-19T16:49:35.678,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1019_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:35.679,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1019]},
{checkpoints,[{1019,0}]},
{name,<<"replication_building_1019_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1019]},
{takeover,false},
{suffix,"building_1019_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1019,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:35.680,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.18997.0>
[rebalance:debug,2014-08-19T16:49:35.680,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:35.680,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26154.0>,#Ref<16550.0.1.20108>}]}
[rebalance:info,2014-08-19T16:49:35.680,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1019
[rebalance:debug,2014-08-19T16:49:35.681,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26154.0>,#Ref<16550.0.1.20108>}]
[ns_server:debug,2014-08-19T16:49:35.681,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:35.684,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/764. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:35.685,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",764,pending,0}
[rebalance:debug,2014-08-19T16:49:35.685,ns_1@10.242.238.90:<0.18908.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.685,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18908.0> (ok)
[rebalance:debug,2014-08-19T16:49:35.701,ns_1@10.242.238.90:<0.18998.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1019
[ns_server:info,2014-08-19T16:49:35.707,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 763 state to replica
[ns_server:info,2014-08-19T16:49:35.713,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 763 to state replica
[ns_server:debug,2014-08-19T16:49:35.785,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1019. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.785,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1019. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.785,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1019,replica,0}
[ns_server:debug,2014-08-19T16:49:35.785,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,765,1021,764,767,1020,1023]
[views:debug,2014-08-19T16:49:35.819,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1019. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:35.819,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1019,replica,0}
[ns_server:debug,2014-08-19T16:49:35.819,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_763_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:35.821,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[763]},
{checkpoints,[{763,0}]},
{name,<<"replication_building_763_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[763]},
{takeover,false},
{suffix,"building_763_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",763,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:35.821,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19016.0>
[rebalance:debug,2014-08-19T16:49:35.822,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:35.822,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26188.0>,#Ref<16550.0.1.20278>}]}
[rebalance:info,2014-08-19T16:49:35.822,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 763
[rebalance:debug,2014-08-19T16:49:35.822,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26188.0>,#Ref<16550.0.1.20278>}]
[ns_server:debug,2014-08-19T16:49:35.823,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19017.0> (ok)
[ns_server:debug,2014-08-19T16:49:35.823,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:35.824,ns_1@10.242.238.90:<0.19018.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 763
[ns_server:debug,2014-08-19T16:49:35.886,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 763. Nacking mccouch update.
[views:debug,2014-08-19T16:49:35.886,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/763. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:35.886,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",763,pending,0}
[ns_server:debug,2014-08-19T16:49:35.886,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,765,1021,764,767,1020,1023,763]
[views:debug,2014-08-19T16:49:35.920,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/763. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:35.920,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",763,pending,0}
[rebalance:debug,2014-08-19T16:49:35.920,ns_1@10.242.238.90:<0.18947.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.920,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18947.0> (ok)
[rebalance:debug,2014-08-19T16:49:35.921,ns_1@10.242.238.90:<0.18965.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:35.921,ns_1@10.242.238.90:<0.18953.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.921,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18965.0> (ok)
[ns_server:debug,2014-08-19T16:49:35.921,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18953.0> (ok)
[ns_server:info,2014-08-19T16:49:35.955,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1018 state to replica
[ns_server:info,2014-08-19T16:49:35.959,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1018 to state replica
[rebalance:debug,2014-08-19T16:49:35.968,ns_1@10.242.238.90:<0.18948.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.968,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18948.0> (ok)
[rebalance:debug,2014-08-19T16:49:35.968,ns_1@10.242.238.90:<0.19018.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:35.968,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19018.0> (ok)
[ns_server:debug,2014-08-19T16:49:36.048,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1018_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.050,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1018]},
{checkpoints,[{1018,0}]},
{name,<<"replication_building_1018_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1018]},
{takeover,false},
{suffix,"building_1018_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1018,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:36.051,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19036.0>
[rebalance:debug,2014-08-19T16:49:36.051,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.051,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26245.0>,#Ref<16550.0.1.20565>}]}
[rebalance:info,2014-08-19T16:49:36.051,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1018
[rebalance:debug,2014-08-19T16:49:36.052,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26245.0>,#Ref<16550.0.1.20565>}]
[ns_server:debug,2014-08-19T16:49:36.052,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:36.071,ns_1@10.242.238.90:<0.19037.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1018
[ns_server:info,2014-08-19T16:49:36.077,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 762 state to replica
[ns_server:info,2014-08-19T16:49:36.083,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 762 to state replica
[ns_server:debug,2014-08-19T16:49:36.190,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_762_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.191,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[762]},
{checkpoints,[{762,0}]},
{name,<<"replication_building_762_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[762]},
{takeover,false},
{suffix,"building_762_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",762,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:36.192,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19055.0>
[rebalance:debug,2014-08-19T16:49:36.192,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.192,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26265.0>,#Ref<16550.0.1.20719>}]}
[rebalance:info,2014-08-19T16:49:36.193,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 762
[rebalance:debug,2014-08-19T16:49:36.193,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26265.0>,#Ref<16550.0.1.20719>}]
[ns_server:debug,2014-08-19T16:49:36.193,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:36.194,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19056.0> (ok)
[rebalance:debug,2014-08-19T16:49:36.195,ns_1@10.242.238.90:<0.19057.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 762
[ns_server:debug,2014-08-19T16:49:36.209,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1018. Nacking mccouch update.
[views:debug,2014-08-19T16:49:36.210,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1018. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:36.210,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,765,1018,1021,764,767,1020,1023,763]
[ns_server:debug,2014-08-19T16:49:36.210,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1018,replica,0}
[views:debug,2014-08-19T16:49:36.293,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1018. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:36.293,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1018,replica,0}
[ns_server:info,2014-08-19T16:49:36.326,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1017 state to replica
[ns_server:info,2014-08-19T16:49:36.329,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1017 to state replica
[rebalance:debug,2014-08-19T16:49:36.361,ns_1@10.242.238.90:<0.18998.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:36.361,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.18998.0> (ok)
[ns_server:debug,2014-08-19T16:49:36.418,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1017_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.419,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1017]},
{checkpoints,[{1017,0}]},
{name,<<"replication_building_1017_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1017]},
{takeover,false},
{suffix,"building_1017_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1017,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:36.420,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19061.0>
[rebalance:debug,2014-08-19T16:49:36.420,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.421,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26330.0>,#Ref<16550.0.1.21059>}]}
[rebalance:info,2014-08-19T16:49:36.421,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1017
[rebalance:debug,2014-08-19T16:49:36.421,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26330.0>,#Ref<16550.0.1.21059>}]
[ns_server:debug,2014-08-19T16:49:36.422,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:36.443,ns_1@10.242.238.90:<0.19063.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1017
[ns_server:info,2014-08-19T16:49:36.450,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 761 state to replica
[ns_server:info,2014-08-19T16:49:36.455,ns_1@10.242.238.90:ns_doctor<0.17441.0>:ns_doctor:update_status:241]The following buckets became ready on node 'ns_1@10.242.238.91': ["default"]
[ns_server:info,2014-08-19T16:49:36.458,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 761 to state replica
[ns_server:debug,2014-08-19T16:49:36.561,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_761_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.562,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[761]},
{checkpoints,[{761,0}]},
{name,<<"replication_building_761_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[761]},
{takeover,false},
{suffix,"building_761_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",761,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:36.563,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19081.0>
[rebalance:debug,2014-08-19T16:49:36.563,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.564,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26366.0>,#Ref<16550.0.1.21227>}]}
[rebalance:info,2014-08-19T16:49:36.564,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 761
[rebalance:debug,2014-08-19T16:49:36.564,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26366.0>,#Ref<16550.0.1.21227>}]
[ns_server:debug,2014-08-19T16:49:36.565,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19082.0> (ok)
[ns_server:debug,2014-08-19T16:49:36.565,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:36.566,ns_1@10.242.238.90:<0.19083.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 761
[ns_server:debug,2014-08-19T16:49:36.594,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 762. Nacking mccouch update.
[views:debug,2014-08-19T16:49:36.594,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/762. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:36.594,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,762,765,1018,1021,764,767,1020,1023,763]
[ns_server:debug,2014-08-19T16:49:36.594,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",762,pending,0}
[views:debug,2014-08-19T16:49:36.661,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/762. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:36.662,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",762,pending,0}
[ns_server:info,2014-08-19T16:49:36.696,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1016 state to replica
[ns_server:info,2014-08-19T16:49:36.700,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1016 to state replica
[ns_server:debug,2014-08-19T16:49:36.790,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1016_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.791,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1016]},
{checkpoints,[{1016,0}]},
{name,<<"replication_building_1016_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1016]},
{takeover,false},
{suffix,"building_1016_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1016,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:36.792,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19087.0>
[rebalance:debug,2014-08-19T16:49:36.792,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.792,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26438.0>,#Ref<16550.0.1.21618>}]}
[rebalance:info,2014-08-19T16:49:36.793,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1016
[rebalance:debug,2014-08-19T16:49:36.793,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26438.0>,#Ref<16550.0.1.21618>}]
[ns_server:debug,2014-08-19T16:49:36.794,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:36.814,ns_1@10.242.238.90:<0.19102.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1016
[ns_server:info,2014-08-19T16:49:36.821,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 760 state to replica
[ns_server:info,2014-08-19T16:49:36.829,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 760 to state replica
[ns_server:debug,2014-08-19T16:49:36.887,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1017. Nacking mccouch update.
[views:debug,2014-08-19T16:49:36.887,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1017. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:36.887,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,762,765,1018,1021,764,767,1017,1020,1023,763]
[ns_server:debug,2014-08-19T16:49:36.887,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1017,replica,0}
[ns_server:debug,2014-08-19T16:49:36.933,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_760_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:36.935,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[760]},
{checkpoints,[{760,0}]},
{name,<<"replication_building_760_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[760]},
{takeover,false},
{suffix,"building_760_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",760,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:36.936,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19106.0>
[rebalance:debug,2014-08-19T16:49:36.936,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:36.936,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26458.0>,#Ref<16550.0.1.21737>}]}
[rebalance:info,2014-08-19T16:49:36.936,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 760
[rebalance:debug,2014-08-19T16:49:36.937,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26458.0>,#Ref<16550.0.1.21737>}]
[ns_server:debug,2014-08-19T16:49:36.937,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19107.0> (ok)
[ns_server:debug,2014-08-19T16:49:36.937,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:36.939,ns_1@10.242.238.90:<0.19108.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 760
[views:debug,2014-08-19T16:49:36.954,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1017. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:36.954,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1017,replica,0}
[ns_server:info,2014-08-19T16:49:37.068,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1015 state to replica
[ns_server:info,2014-08-19T16:49:37.073,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1015 to state replica
[ns_server:debug,2014-08-19T16:49:37.095,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 761. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.095,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/761. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.095,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",761,pending,0}
[ns_server:debug,2014-08-19T16:49:37.095,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1019,1022,762,765,1018,1021,761,764,767,1017,1020,1023,763]
[views:debug,2014-08-19T16:49:37.129,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/761. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.129,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",761,pending,0}
[ns_server:debug,2014-08-19T16:49:37.163,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1015_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:37.164,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1015]},
{checkpoints,[{1015,0}]},
{name,<<"replication_building_1015_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1015]},
{takeover,false},
{suffix,"building_1015_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1015,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:37.165,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19126.0>
[rebalance:debug,2014-08-19T16:49:37.165,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:37.165,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26515.0>,#Ref<16550.0.1.22025>}]}
[rebalance:info,2014-08-19T16:49:37.165,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1015
[rebalance:debug,2014-08-19T16:49:37.166,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26515.0>,#Ref<16550.0.1.22025>}]
[ns_server:debug,2014-08-19T16:49:37.166,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:37.187,ns_1@10.242.238.90:<0.19141.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1015
[ns_server:info,2014-08-19T16:49:37.194,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 759 state to replica
[ns_server:info,2014-08-19T16:49:37.200,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 759 to state replica
[ns_server:debug,2014-08-19T16:49:37.204,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1016. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.204,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1016. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.204,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1016,replica,0}
[ns_server:debug,2014-08-19T16:49:37.204,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,1022,762,765,1018,1021,761,764,767,1017,1020,1023,763]
[views:debug,2014-08-19T16:49:37.239,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1016. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.239,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1016,replica,0}
[rebalance:debug,2014-08-19T16:49:37.264,ns_1@10.242.238.90:<0.19145.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1023
[ns_server:debug,2014-08-19T16:49:37.305,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 760. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.305,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/760. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.305,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",760,pending,0}
[ns_server:debug,2014-08-19T16:49:37.305,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_759_'ns_1@10.242.238.90'
[ns_server:debug,2014-08-19T16:49:37.305,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,1022,762,765,1018,1021,761,764,767,1017,1020,760,1023,763]
[rebalance:info,2014-08-19T16:49:37.306,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[759]},
{checkpoints,[{759,0}]},
{name,<<"replication_building_759_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[759]},
{takeover,false},
{suffix,"building_759_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",759,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:37.307,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19162.0>
[rebalance:debug,2014-08-19T16:49:37.307,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:37.307,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26554.0>,#Ref<16550.0.1.22208>}]}
[rebalance:info,2014-08-19T16:49:37.308,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 759
[rebalance:debug,2014-08-19T16:49:37.308,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26554.0>,#Ref<16550.0.1.22208>}]
[ns_server:debug,2014-08-19T16:49:37.308,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:37.309,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19163.0> (ok)
[rebalance:debug,2014-08-19T16:49:37.310,ns_1@10.242.238.90:<0.19164.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 759
[views:debug,2014-08-19T16:49:37.339,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/760. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.339,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",760,pending,0}
[ns_server:debug,2014-08-19T16:49:37.422,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1015. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.422,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1015. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.422,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1015,replica,0}
[ns_server:debug,2014-08-19T16:49:37.422,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,1022,762,765,1015,1018,1021,761,764,767,1017,1020,760,1023,763]
[ns_server:info,2014-08-19T16:49:37.440,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1014 state to replica
[ns_server:info,2014-08-19T16:49:37.443,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1014 to state replica
[views:debug,2014-08-19T16:49:37.457,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1015. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.457,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1015,replica,0}
[ns_server:debug,2014-08-19T16:49:37.523,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 759. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.523,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/759. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.523,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",759,pending,0}
[ns_server:debug,2014-08-19T16:49:37.523,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,759,1022,762,765,1015,1018,1021,761,764,767,1017,1020,760,1023,
763]
[ns_server:debug,2014-08-19T16:49:37.533,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1014_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:37.534,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1014]},
{checkpoints,[{1014,0}]},
{name,<<"replication_building_1014_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1014]},
{takeover,false},
{suffix,"building_1014_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1014,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:37.535,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19202.0>
[rebalance:debug,2014-08-19T16:49:37.535,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:37.535,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26619.0>,#Ref<16550.0.1.22592>}]}
[rebalance:info,2014-08-19T16:49:37.535,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1014
[rebalance:debug,2014-08-19T16:49:37.536,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26619.0>,#Ref<16550.0.1.22592>}]
[ns_server:debug,2014-08-19T16:49:37.536,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:37.556,ns_1@10.242.238.90:<0.19203.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1014
[ns_server:info,2014-08-19T16:49:37.562,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 758 state to replica
[views:debug,2014-08-19T16:49:37.563,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/759. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:37.563,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",759,pending,0}
[rebalance:debug,2014-08-19T16:49:37.564,ns_1@10.242.238.90:<0.19108.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:37.564,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19108.0> (ok)
[ns_server:info,2014-08-19T16:49:37.570,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 758 to state replica
[ns_server:debug,2014-08-19T16:49:37.676,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_758_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:37.678,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[758]},
{checkpoints,[{758,0}]},
{name,<<"replication_building_758_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[758]},
{takeover,false},
{suffix,"building_758_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",758,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:37.678,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19221.0>
[rebalance:debug,2014-08-19T16:49:37.678,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:37.679,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26653.0>,#Ref<16550.0.1.23028>}]}
[rebalance:info,2014-08-19T16:49:37.679,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 758
[rebalance:debug,2014-08-19T16:49:37.679,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26653.0>,#Ref<16550.0.1.23028>}]
[ns_server:debug,2014-08-19T16:49:37.680,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:37.680,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19222.0> (ok)
[rebalance:debug,2014-08-19T16:49:37.682,ns_1@10.242.238.90:<0.19223.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 758
[ns_server:debug,2014-08-19T16:49:37.721,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1014. Nacking mccouch update.
[views:debug,2014-08-19T16:49:37.722,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1014. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.722,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1014,replica,0}
[ns_server:debug,2014-08-19T16:49:37.722,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,759,1022,762,765,1015,1018,1021,761,764,767,1014,1017,1020,760,
1023,763]
[views:debug,2014-08-19T16:49:37.788,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1014. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:37.789,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1014,replica,0}
[ns_server:info,2014-08-19T16:49:37.813,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1013 state to replica
[ns_server:info,2014-08-19T16:49:37.817,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1013 to state replica
[ns_server:debug,2014-08-19T16:49:37.907,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1013_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:37.908,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1013]},
{checkpoints,[{1013,0}]},
{name,<<"replication_building_1013_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1013]},
{takeover,false},
{suffix,"building_1013_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1013,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:37.909,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19228.0>
[rebalance:debug,2014-08-19T16:49:37.909,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:37.910,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26724.0>,#Ref<16550.0.1.24070>}]}
[rebalance:info,2014-08-19T16:49:37.910,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1013
[rebalance:debug,2014-08-19T16:49:37.910,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26724.0>,#Ref<16550.0.1.24070>}]
[ns_server:debug,2014-08-19T16:49:37.911,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:37.932,ns_1@10.242.238.90:<0.19232.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1013
[ns_server:info,2014-08-19T16:49:37.938,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 757 state to replica
[ns_server:info,2014-08-19T16:49:37.945,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 757 to state replica
[ns_server:debug,2014-08-19T16:49:38.007,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 758. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.007,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/758. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.007,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",758,replica,0}
[ns_server:debug,2014-08-19T16:49:38.008,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1016,1019,759,1022,762,765,1015,1018,758,1021,761,764,767,1014,1017,1020,
760,1023,763]
[ns_server:debug,2014-08-19T16:49:38.051,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_757_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:38.052,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[757]},
{checkpoints,[{757,0}]},
{name,<<"replication_building_757_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[757]},
{takeover,false},
{suffix,"building_757_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",757,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:38.052,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19247.0>
[rebalance:debug,2014-08-19T16:49:38.053,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:38.053,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26789.0>,#Ref<16550.0.1.24642>}]}
[rebalance:info,2014-08-19T16:49:38.053,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 757
[rebalance:debug,2014-08-19T16:49:38.053,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26789.0>,#Ref<16550.0.1.24642>}]
[ns_server:debug,2014-08-19T16:49:38.054,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:38.054,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19248.0> (ok)
[rebalance:debug,2014-08-19T16:49:38.056,ns_1@10.242.238.90:<0.19249.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 757
[views:debug,2014-08-19T16:49:38.074,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/758. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.075,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",758,replica,0}
[ns_server:info,2014-08-19T16:49:38.188,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1012 state to replica
[ns_server:info,2014-08-19T16:49:38.193,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1012 to state replica
[ns_server:debug,2014-08-19T16:49:38.233,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1013. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.233,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1013. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.233,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1013,replica,0}
[ns_server:debug,2014-08-19T16:49:38.233,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,1019,759,1022,762,765,1015,1018,758,1021,761,764,767,1014,1017,
1020,760,1023,763]
[ns_server:debug,2014-08-19T16:49:38.283,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1012_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:38.285,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1012]},
{checkpoints,[{1012,0}]},
{name,<<"replication_building_1012_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1012]},
{takeover,false},
{suffix,"building_1012_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1012,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:38.285,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19267.0>
[rebalance:debug,2014-08-19T16:49:38.285,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:38.286,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26846.0>,#Ref<16550.0.1.24941>}]}
[rebalance:info,2014-08-19T16:49:38.286,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1012
[rebalance:debug,2014-08-19T16:49:38.286,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26846.0>,#Ref<16550.0.1.24941>}]
[ns_server:debug,2014-08-19T16:49:38.287,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:38.306,ns_1@10.242.238.90:<0.19268.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1012
[ns_server:info,2014-08-19T16:49:38.313,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 756 state to replica
[views:debug,2014-08-19T16:49:38.317,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1013. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.317,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1013,replica,0}
[ns_server:info,2014-08-19T16:49:38.318,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 756 to state replica
[ns_server:info,2014-08-19T16:49:38.342,ns_1@10.242.238.90:ns_doctor<0.17441.0>:ns_doctor:update_status:241]The following buckets became ready on node 'ns_1@10.242.238.90': ["default"]
[ns_server:debug,2014-08-19T16:49:38.426,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_756_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:38.427,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[756]},
{checkpoints,[{756,0}]},
{name,<<"replication_building_756_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[756]},
{takeover,false},
{suffix,"building_756_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",756,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:38.428,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19294.0>
[rebalance:debug,2014-08-19T16:49:38.428,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:38.428,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26866.0>,#Ref<16550.0.1.25075>}]}
[rebalance:info,2014-08-19T16:49:38.428,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 756
[rebalance:debug,2014-08-19T16:49:38.429,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26866.0>,#Ref<16550.0.1.25075>}]
[ns_server:debug,2014-08-19T16:49:38.429,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19295.0> (ok)
[ns_server:debug,2014-08-19T16:49:38.429,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:38.431,ns_1@10.242.238.90:<0.19298.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 756
[ns_server:debug,2014-08-19T16:49:38.475,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 757. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.476,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/757. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.476,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,1019,759,1022,762,765,1015,1018,758,1021,761,764,767,1014,1017,
757,1020,760,1023,763]
[ns_server:debug,2014-08-19T16:49:38.476,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",757,pending,0}
[views:debug,2014-08-19T16:49:38.548,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/757. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.548,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",757,pending,0}
[ns_server:info,2014-08-19T16:49:38.562,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1011 state to replica
[ns_server:info,2014-08-19T16:49:38.566,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1011 to state replica
[ns_server:debug,2014-08-19T16:49:38.616,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1012. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.617,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1012. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.617,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1012,replica,0}
[ns_server:debug,2014-08-19T16:49:38.617,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,1019,759,1022,762,765,1012,1015,1018,758,1021,761,764,767,1014,
1017,757,1020,760,1023,763]
[views:debug,2014-08-19T16:49:38.650,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1012. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.651,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1012,replica,0}
[ns_server:debug,2014-08-19T16:49:38.656,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1011_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:38.658,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1011]},
{checkpoints,[{1011,0}]},
{name,<<"replication_building_1011_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1011]},
{takeover,false},
{suffix,"building_1011_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1011,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:38.659,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19316.0>
[rebalance:debug,2014-08-19T16:49:38.659,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:38.659,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26924.0>,#Ref<16550.0.1.25407>}]}
[rebalance:info,2014-08-19T16:49:38.659,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1011
[rebalance:debug,2014-08-19T16:49:38.660,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26924.0>,#Ref<16550.0.1.25407>}]
[ns_server:debug,2014-08-19T16:49:38.660,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:38.679,ns_1@10.242.238.90:<0.19317.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1011
[views:debug,2014-08-19T16:49:38.684,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/758. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.684,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",758,pending,0}
[ns_server:info,2014-08-19T16:49:38.685,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 755 state to replica
[ns_server:info,2014-08-19T16:49:38.692,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 755 to state replica
[ns_server:debug,2014-08-19T16:49:38.751,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 756. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.751,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/756. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.751,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",756,pending,0}
[ns_server:debug,2014-08-19T16:49:38.751,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,765,1012,1015,1018,758,1021,761,764,767,
1014,1017,757,1020,760,1023,763]
[views:debug,2014-08-19T16:49:38.784,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/756. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.785,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",756,pending,0}
[ns_server:debug,2014-08-19T16:49:38.798,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_755_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:38.800,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[755]},
{checkpoints,[{755,0}]},
{name,<<"replication_building_755_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[755]},
{takeover,false},
{suffix,"building_755_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",755,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:38.800,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19335.0>
[rebalance:debug,2014-08-19T16:49:38.800,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:38.801,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.26958.0>,#Ref<16550.0.1.25573>}]}
[rebalance:info,2014-08-19T16:49:38.801,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 755
[rebalance:debug,2014-08-19T16:49:38.801,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.26958.0>,#Ref<16550.0.1.25573>}]
[ns_server:debug,2014-08-19T16:49:38.802,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19336.0> (ok)
[ns_server:debug,2014-08-19T16:49:38.802,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:38.804,ns_1@10.242.238.90:<0.19337.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 755
[ns_server:debug,2014-08-19T16:49:38.868,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1011. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.868,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1011. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.869,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,765,1012,1015,1018,758,1021,761,764,1011,
767,1014,1017,757,1020,760,1023,763]
[ns_server:debug,2014-08-19T16:49:38.869,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1011,replica,0}
[views:debug,2014-08-19T16:49:38.902,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1011. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:38.902,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1011,replica,0}
[ns_server:info,2014-08-19T16:49:38.936,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1010 state to replica
[ns_server:info,2014-08-19T16:49:38.940,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1010 to state replica
[ns_server:debug,2014-08-19T16:49:38.986,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 755. Nacking mccouch update.
[views:debug,2014-08-19T16:49:38.986,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/755. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:38.986,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",755,pending,0}
[ns_server:debug,2014-08-19T16:49:38.986,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,765,1012,1015,755,1018,758,1021,761,764,
1011,767,1014,1017,757,1020,760,1023,763]
[views:debug,2014-08-19T16:49:39.026,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/755. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:39.026,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",755,pending,0}
[rebalance:debug,2014-08-19T16:49:39.027,ns_1@10.242.238.90:<0.19057.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:39.027,ns_1@10.242.238.90:<0.19337.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:39.027,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19057.0> (ok)
[ns_server:debug,2014-08-19T16:49:39.027,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19337.0> (ok)
[ns_server:debug,2014-08-19T16:49:39.030,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1010_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.031,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1010]},
{checkpoints,[{1010,0}]},
{name,<<"replication_building_1010_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1010]},
{takeover,false},
{suffix,"building_1010_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1010,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:39.032,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19369.0>
[rebalance:debug,2014-08-19T16:49:39.032,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.032,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27015.0>,#Ref<16550.0.1.25863>}]}
[rebalance:info,2014-08-19T16:49:39.032,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1010
[rebalance:debug,2014-08-19T16:49:39.033,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27015.0>,#Ref<16550.0.1.25863>}]
[ns_server:debug,2014-08-19T16:49:39.034,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.053,ns_1@10.242.238.90:<0.19370.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1010
[ns_server:info,2014-08-19T16:49:39.059,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 754 state to replica
[ns_server:info,2014-08-19T16:49:39.068,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 754 to state replica
[ns_server:debug,2014-08-19T16:49:39.174,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_754_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.175,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[754]},
{checkpoints,[{754,0}]},
{name,<<"replication_building_754_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[754]},
{takeover,false},
{suffix,"building_754_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",754,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:39.176,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19388.0>
[rebalance:debug,2014-08-19T16:49:39.176,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.177,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27049.0>,#Ref<16550.0.1.26050>}]}
[rebalance:info,2014-08-19T16:49:39.177,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 754
[rebalance:debug,2014-08-19T16:49:39.177,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27049.0>,#Ref<16550.0.1.26050>}]
[ns_server:debug,2014-08-19T16:49:39.178,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19389.0> (ok)
[ns_server:debug,2014-08-19T16:49:39.178,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.179,ns_1@10.242.238.90:<0.19390.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 754
[ns_server:debug,2014-08-19T16:49:39.184,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1010. Nacking mccouch update.
[views:debug,2014-08-19T16:49:39.185,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1010. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:39.185,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1010,replica,0}
[ns_server:debug,2014-08-19T16:49:39.185,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,765,1012,1015,755,1018,758,1021,761,764,
1011,767,1014,1017,757,1020,760,1023,763,1010]
[views:debug,2014-08-19T16:49:39.260,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1010. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:39.260,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1010,replica,0}
[ns_server:info,2014-08-19T16:49:39.310,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1009 state to replica
[ns_server:info,2014-08-19T16:49:39.314,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1009 to state replica
[ns_server:debug,2014-08-19T16:49:39.404,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1009_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.405,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1009]},
{checkpoints,[{1009,0}]},
{name,<<"replication_building_1009_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1009]},
{takeover,false},
{suffix,"building_1009_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1009,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:39.406,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19414.0>
[rebalance:debug,2014-08-19T16:49:39.406,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.406,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27125.0>,#Ref<16550.0.1.26484>}]}
[rebalance:info,2014-08-19T16:49:39.407,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1009
[rebalance:debug,2014-08-19T16:49:39.407,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27125.0>,#Ref<16550.0.1.26484>}]
[ns_server:debug,2014-08-19T16:49:39.408,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.427,ns_1@10.242.238.90:<0.19415.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1009
[ns_server:info,2014-08-19T16:49:39.433,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 753 state to replica
[ns_server:info,2014-08-19T16:49:39.441,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 753 to state replica
[ns_server:debug,2014-08-19T16:49:39.486,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1009. Nacking mccouch update.
[views:debug,2014-08-19T16:49:39.486,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1009. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:39.486,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,1009,765,1012,1015,755,1018,758,1021,761,
764,1011,767,1014,1017,757,1020,760,1023,763,1010]
[ns_server:debug,2014-08-19T16:49:39.487,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1009,replica,0}
[ns_server:debug,2014-08-19T16:49:39.546,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_753_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.548,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[753]},
{checkpoints,[{753,0}]},
{name,<<"replication_building_753_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[753]},
{takeover,false},
{suffix,"building_753_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",753,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:39.548,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19420.0>
[rebalance:debug,2014-08-19T16:49:39.548,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.549,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27159.0>,#Ref<16550.0.1.26654>}]}
[rebalance:info,2014-08-19T16:49:39.549,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 753
[rebalance:debug,2014-08-19T16:49:39.549,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27159.0>,#Ref<16550.0.1.26654>}]
[ns_server:debug,2014-08-19T16:49:39.550,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19421.0> (ok)
[ns_server:debug,2014-08-19T16:49:39.550,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.552,ns_1@10.242.238.90:<0.19422.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 753
[views:debug,2014-08-19T16:49:39.553,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1009. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:39.553,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1009,replica,0}
[ns_server:info,2014-08-19T16:49:39.685,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1008 state to replica
[ns_server:info,2014-08-19T16:49:39.688,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1008 to state replica
[ns_server:debug,2014-08-19T16:49:39.770,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 754. Nacking mccouch update.
[views:debug,2014-08-19T16:49:39.770,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/754. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:39.770,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",754,pending,0}
[ns_server:debug,2014-08-19T16:49:39.770,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,1009,765,1012,1015,755,1018,758,1021,761,
764,1011,767,1014,754,1017,757,1020,760,1023,763,1010]
[ns_server:debug,2014-08-19T16:49:39.778,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1008_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.779,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1008]},
{checkpoints,[{1008,0}]},
{name,<<"replication_building_1008_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1008]},
{takeover,false},
{suffix,"building_1008_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1008,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:39.780,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19440.0>
[rebalance:debug,2014-08-19T16:49:39.780,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.781,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27216.0>,#Ref<16550.0.1.26969>}]}
[rebalance:info,2014-08-19T16:49:39.781,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1008
[rebalance:debug,2014-08-19T16:49:39.781,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27216.0>,#Ref<16550.0.1.26969>}]
[ns_server:debug,2014-08-19T16:49:39.782,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.802,ns_1@10.242.238.90:<0.19441.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1008
[ns_server:info,2014-08-19T16:49:39.807,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 752 state to replica
[ns_server:info,2014-08-19T16:49:39.814,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 752 to state replica
[views:debug,2014-08-19T16:49:39.837,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/754. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:39.838,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",754,pending,0}
[ns_server:debug,2014-08-19T16:49:39.919,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_752_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:39.920,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[752]},
{checkpoints,[{752,0}]},
{name,<<"replication_building_752_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[752]},
{takeover,false},
{suffix,"building_752_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",752,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:39.921,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19459.0>
[rebalance:debug,2014-08-19T16:49:39.921,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:39.921,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27250.0>,#Ref<16550.0.1.27140>}]}
[rebalance:info,2014-08-19T16:49:39.921,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 752
[rebalance:debug,2014-08-19T16:49:39.922,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27250.0>,#Ref<16550.0.1.27140>}]
[ns_server:debug,2014-08-19T16:49:39.922,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19460.0> (ok)
[ns_server:debug,2014-08-19T16:49:39.922,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:39.924,ns_1@10.242.238.90:<0.19461.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 752
[ns_server:debug,2014-08-19T16:49:39.996,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1008. Nacking mccouch update.
[views:debug,2014-08-19T16:49:39.996,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1008. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:39.996,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1008,replica,0}
[ns_server:debug,2014-08-19T16:49:39.996,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,1009,765,1012,1015,755,1018,758,1021,761,
1008,764,1011,767,1014,754,1017,757,1020,760,1023,763,1010]
[views:debug,2014-08-19T16:49:40.045,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1008. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:40.045,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1008,replica,0}
[ns_server:info,2014-08-19T16:49:40.056,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1007 state to replica
[ns_server:info,2014-08-19T16:49:40.060,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1007 to state replica
[ns_server:debug,2014-08-19T16:49:40.120,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 752. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.120,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/752. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.121,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",752,pending,0}
[ns_server:debug,2014-08-19T16:49:40.121,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,1009,765,1012,752,1015,755,1018,758,1021,
761,1008,764,1011,767,1014,754,1017,757,1020,760,1023,763,1010]
[ns_server:debug,2014-08-19T16:49:40.150,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1007_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:40.152,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1007]},
{checkpoints,[{1007,0}]},
{name,<<"replication_building_1007_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1007]},
{takeover,false},
{suffix,"building_1007_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1007,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:40.153,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19479.0>
[rebalance:debug,2014-08-19T16:49:40.153,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:40.153,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27307.0>,#Ref<16550.0.1.27427>}]}
[rebalance:info,2014-08-19T16:49:40.154,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1007
[rebalance:debug,2014-08-19T16:49:40.154,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27307.0>,#Ref<16550.0.1.27427>}]
[views:debug,2014-08-19T16:49:40.154,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/752. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.154,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",752,pending,0}
[ns_server:debug,2014-08-19T16:49:40.155,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:40.174,ns_1@10.242.238.90:<0.19480.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1007
[ns_server:info,2014-08-19T16:49:40.180,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 751 state to replica
[ns_server:info,2014-08-19T16:49:40.187,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 751 to state replica
[ns_server:debug,2014-08-19T16:49:40.229,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1007. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.229,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1007. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:40.230,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1007,replica,0}
[ns_server:debug,2014-08-19T16:49:40.230,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,1016,756,1019,759,1022,762,1009,765,1012,752,1015,755,1018,758,1021,
761,1008,764,1011,767,1014,754,1017,757,1020,760,1023,1007,763,1010]
[views:debug,2014-08-19T16:49:40.263,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1007. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:40.263,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1007,replica,0}
[ns_server:debug,2014-08-19T16:49:40.295,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_751_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:40.296,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[751]},
{checkpoints,[{751,0}]},
{name,<<"replication_building_751_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[751]},
{takeover,false},
{suffix,"building_751_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",751,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:40.297,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19498.0>
[rebalance:debug,2014-08-19T16:49:40.297,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:40.298,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27327.0>,#Ref<16550.0.1.27579>}]}
[rebalance:info,2014-08-19T16:49:40.298,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 751
[rebalance:debug,2014-08-19T16:49:40.298,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27327.0>,#Ref<16550.0.1.27579>}]
[ns_server:debug,2014-08-19T16:49:40.299,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19499.0> (ok)
[ns_server:debug,2014-08-19T16:49:40.299,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:40.300,ns_1@10.242.238.90:<0.19500.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 751
[ns_server:debug,2014-08-19T16:49:40.338,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 753. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.339,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/753. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.339,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",753,pending,0}
[ns_server:debug,2014-08-19T16:49:40.339,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,753,1016,756,1019,759,1022,762,1009,765,1012,752,1015,755,1018,758,
1021,761,1008,764,1011,767,1014,754,1017,757,1020,760,1023,1007,763,1010]
[views:debug,2014-08-19T16:49:40.397,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/753. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.397,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",753,pending,0}
[rebalance:debug,2014-08-19T16:49:40.398,ns_1@10.242.238.90:<0.19203.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:40.398,ns_1@10.242.238.90:<0.19249.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:40.398,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19203.0> (ok)
[ns_server:debug,2014-08-19T16:49:40.398,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19249.0> (ok)
[ns_server:info,2014-08-19T16:49:40.432,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1006 state to replica
[ns_server:info,2014-08-19T16:49:40.438,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1006 to state replica
[ns_server:debug,2014-08-19T16:49:40.529,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1006_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:40.530,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1006]},
{checkpoints,[{1006,0}]},
{name,<<"replication_building_1006_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1006]},
{takeover,false},
{suffix,"building_1006_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1006,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:40.531,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19532.0>
[rebalance:debug,2014-08-19T16:49:40.531,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:40.532,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27384.0>,#Ref<16550.0.1.27882>}]}
[rebalance:info,2014-08-19T16:49:40.532,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1006
[rebalance:debug,2014-08-19T16:49:40.532,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27384.0>,#Ref<16550.0.1.27882>}]
[ns_server:debug,2014-08-19T16:49:40.533,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:40.548,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 751. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.548,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/751. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.548,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,753,1016,756,1019,759,1022,762,1009,765,1012,752,1015,755,1018,758,
1021,761,1008,764,1011,767,751,1014,754,1017,757,1020,760,1023,1007,763,1010]
[ns_server:debug,2014-08-19T16:49:40.548,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",751,pending,0}
[rebalance:debug,2014-08-19T16:49:40.554,ns_1@10.242.238.90:<0.19533.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1006
[ns_server:info,2014-08-19T16:49:40.560,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 750 state to replica
[ns_server:info,2014-08-19T16:49:40.565,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 750 to state replica
[views:debug,2014-08-19T16:49:40.621,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/751. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.621,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",751,pending,0}
[ns_server:debug,2014-08-19T16:49:40.672,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_750_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:40.673,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[750]},
{checkpoints,[{750,0}]},
{name,<<"replication_building_750_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[750]},
{takeover,false},
{suffix,"building_750_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",750,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:40.674,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19537.0>
[rebalance:debug,2014-08-19T16:49:40.674,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:40.674,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27418.0>,#Ref<16550.0.1.28074>}]}
[rebalance:info,2014-08-19T16:49:40.675,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 750
[rebalance:debug,2014-08-19T16:49:40.675,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27418.0>,#Ref<16550.0.1.28074>}]
[ns_server:debug,2014-08-19T16:49:40.676,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19538.0> (ok)
[ns_server:debug,2014-08-19T16:49:40.676,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:40.677,ns_1@10.242.238.90:<0.19539.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 750
[ns_server:info,2014-08-19T16:49:40.809,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1005 state to replica
[ns_server:info,2014-08-19T16:49:40.814,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1005 to state replica
[ns_server:debug,2014-08-19T16:49:40.863,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1006. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.864,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1006. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:40.864,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1006,replica,0}
[ns_server:debug,2014-08-19T16:49:40.864,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,1013,753,1016,756,1019,759,1022,1006,762,1009,765,1012,752,1015,755,1018,
758,1021,761,1008,764,1011,767,751,1014,754,1017,757,1020,760,1023,1007,763,
1010]
[views:debug,2014-08-19T16:49:40.897,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1006. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:40.898,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1006,replica,0}
[ns_server:debug,2014-08-19T16:49:40.905,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1005_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:40.906,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1005]},
{checkpoints,[{1005,0}]},
{name,<<"replication_building_1005_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1005]},
{takeover,false},
{suffix,"building_1005_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1005,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:40.907,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19557.0>
[rebalance:debug,2014-08-19T16:49:40.907,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:40.907,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27489.0>,#Ref<16550.0.1.28458>}]}
[rebalance:info,2014-08-19T16:49:40.907,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1005
[rebalance:debug,2014-08-19T16:49:40.908,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27489.0>,#Ref<16550.0.1.28458>}]
[ns_server:debug,2014-08-19T16:49:40.909,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:40.930,ns_1@10.242.238.90:<0.19558.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1005
[ns_server:info,2014-08-19T16:49:40.936,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 749 state to replica
[ns_server:info,2014-08-19T16:49:40.945,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 749 to state replica
[ns_server:debug,2014-08-19T16:49:40.981,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 750. Nacking mccouch update.
[views:debug,2014-08-19T16:49:40.981,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/750. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:40.981,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",750,pending,0}
[ns_server:debug,2014-08-19T16:49:40.981,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,759,1022,1006,762,1009,765,1012,752,1015,755,
1018,758,1021,761,1008,764,1011,767,751,1014,754,1017,757,1020,760,1023,1007,
763,1010]
[views:debug,2014-08-19T16:49:41.016,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/750. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:41.017,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",750,pending,0}
[ns_server:debug,2014-08-19T16:49:41.050,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_749_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:41.051,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[749]},
{checkpoints,[{749,0}]},
{name,<<"replication_building_749_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[749]},
{takeover,false},
{suffix,"building_749_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",749,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:41.052,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19576.0>
[rebalance:debug,2014-08-19T16:49:41.052,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:41.052,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27523.0>,#Ref<16550.0.1.28647>}]}
[rebalance:info,2014-08-19T16:49:41.053,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 749
[rebalance:debug,2014-08-19T16:49:41.053,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27523.0>,#Ref<16550.0.1.28647>}]
[ns_server:debug,2014-08-19T16:49:41.053,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19577.0> (ok)
[ns_server:debug,2014-08-19T16:49:41.054,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:41.055,ns_1@10.242.238.90:<0.19578.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 749
[ns_server:debug,2014-08-19T16:49:41.100,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1005. Nacking mccouch update.
[views:debug,2014-08-19T16:49:41.100,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1005. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.100,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,759,1022,1006,762,1009,765,1012,752,1015,755,
1018,758,1021,1005,761,1008,764,1011,767,751,1014,754,1017,757,1020,760,1023,
1007,763,1010]
[ns_server:debug,2014-08-19T16:49:41.100,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1005,replica,0}
[views:debug,2014-08-19T16:49:41.134,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1005. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.134,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1005,replica,0}
[ns_server:info,2014-08-19T16:49:41.189,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1004 state to replica
[ns_server:info,2014-08-19T16:49:41.192,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1004 to state replica
[ns_server:debug,2014-08-19T16:49:41.217,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 749. Nacking mccouch update.
[views:debug,2014-08-19T16:49:41.217,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/749. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:41.217,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",749,pending,0}
[ns_server:debug,2014-08-19T16:49:41.218,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,759,1022,1006,762,1009,765,749,1012,752,1015,
755,1018,758,1021,1005,761,1008,764,1011,767,751,1014,754,1017,757,1020,760,
1023,1007,763,1010]
[ns_server:debug,2014-08-19T16:49:41.283,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1004_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:41.284,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1004]},
{checkpoints,[{1004,0}]},
{name,<<"replication_building_1004_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1004]},
{takeover,false},
{suffix,"building_1004_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1004,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:41.285,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19610.0>
[rebalance:debug,2014-08-19T16:49:41.285,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:41.285,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27599.0>,#Ref<16550.0.1.29026>}]}
[rebalance:info,2014-08-19T16:49:41.286,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1004
[rebalance:debug,2014-08-19T16:49:41.286,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27599.0>,#Ref<16550.0.1.29026>}]
[ns_server:debug,2014-08-19T16:49:41.288,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:41.290,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/749. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:41.290,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",749,pending,0}
[rebalance:debug,2014-08-19T16:49:41.306,ns_1@10.242.238.90:<0.19611.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1004
[ns_server:info,2014-08-19T16:49:41.313,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 748 state to replica
[ns_server:info,2014-08-19T16:49:41.320,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 748 to state replica
[ns_server:debug,2014-08-19T16:49:41.407,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1004. Nacking mccouch update.
[views:debug,2014-08-19T16:49:41.407,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1004. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.407,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1004,replica,0}
[ns_server:debug,2014-08-19T16:49:41.407,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,759,1022,1006,762,1009,765,749,1012,752,1015,
755,1018,758,1021,1005,761,1008,764,1011,767,751,1014,754,1017,757,1020,1004,
760,1023,1007,763,1010]
[ns_server:debug,2014-08-19T16:49:41.427,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_748_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:41.428,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[748]},
{checkpoints,[{748,0}]},
{name,<<"replication_building_748_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[748]},
{takeover,false},
{suffix,"building_748_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",748,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:41.429,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19635.0>
[rebalance:debug,2014-08-19T16:49:41.429,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:41.429,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27627.0>,#Ref<16550.0.1.29181>}]}
[rebalance:info,2014-08-19T16:49:41.430,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 748
[rebalance:debug,2014-08-19T16:49:41.430,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27627.0>,#Ref<16550.0.1.29181>}]
[ns_server:debug,2014-08-19T16:49:41.431,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19636.0> (ok)
[ns_server:debug,2014-08-19T16:49:41.432,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:41.432,ns_1@10.242.238.90:<0.19637.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 748
[views:debug,2014-08-19T16:49:41.483,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1004. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.483,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1004,replica,0}
[ns_server:info,2014-08-19T16:49:41.562,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1003 state to replica
[ns_server:info,2014-08-19T16:49:41.566,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1003 to state replica
[ns_server:debug,2014-08-19T16:49:41.634,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 748. Nacking mccouch update.
[views:debug,2014-08-19T16:49:41.634,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/748. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:41.634,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",748,pending,0}
[ns_server:debug,2014-08-19T16:49:41.635,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,759,1022,1006,762,1009,765,749,1012,752,1015,
755,1018,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,1017,757,1020,
1004,760,1023,1007,763,1010]
[ns_server:debug,2014-08-19T16:49:41.658,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1003_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:41.659,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1003]},
{checkpoints,[{1003,0}]},
{name,<<"replication_building_1003_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1003]},
{takeover,false},
{suffix,"building_1003_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1003,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:41.660,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19655.0>
[rebalance:debug,2014-08-19T16:49:41.660,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:41.661,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27702.0>,#Ref<16550.0.1.29576>}]}
[rebalance:info,2014-08-19T16:49:41.661,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1003
[rebalance:debug,2014-08-19T16:49:41.661,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27702.0>,#Ref<16550.0.1.29576>}]
[ns_server:debug,2014-08-19T16:49:41.663,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:41.668,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/748. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:41.669,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",748,pending,0}
[rebalance:debug,2014-08-19T16:49:41.669,ns_1@10.242.238.90:<0.19164.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:41.669,ns_1@10.242.238.90:<0.19102.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:41.669,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19164.0> (ok)
[ns_server:debug,2014-08-19T16:49:41.669,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19102.0> (ok)
[rebalance:debug,2014-08-19T16:49:41.680,ns_1@10.242.238.90:<0.19656.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1003
[ns_server:info,2014-08-19T16:49:41.686,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 747 state to replica
[ns_server:info,2014-08-19T16:49:41.692,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 747 to state replica
[ns_server:debug,2014-08-19T16:49:41.799,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_747_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:41.800,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[747]},
{checkpoints,[{747,0}]},
{name,<<"replication_building_747_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[747]},
{takeover,false},
{suffix,"building_747_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",747,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:41.801,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19674.0>
[rebalance:debug,2014-08-19T16:49:41.801,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:41.802,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27722.0>,#Ref<16550.0.1.29697>}]}
[rebalance:info,2014-08-19T16:49:41.802,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 747
[rebalance:debug,2014-08-19T16:49:41.802,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27722.0>,#Ref<16550.0.1.29697>}]
[ns_server:debug,2014-08-19T16:49:41.803,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19675.0> (ok)
[ns_server:debug,2014-08-19T16:49:41.803,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:41.804,ns_1@10.242.238.90:<0.19676.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 747
[ns_server:debug,2014-08-19T16:49:41.810,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1003. Nacking mccouch update.
[views:debug,2014-08-19T16:49:41.810,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1003. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.811,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1003,replica,0}
[ns_server:debug,2014-08-19T16:49:41.811,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,1003,759,1022,1006,762,1009,765,749,1012,752,
1015,755,1018,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,1017,757,
1020,1004,760,1023,1007,763,1010]
[views:debug,2014-08-19T16:49:41.878,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1003. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:41.878,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1003,replica,0}
[ns_server:info,2014-08-19T16:49:41.935,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1002 state to replica
[ns_server:info,2014-08-19T16:49:41.939,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1002 to state replica
[ns_server:debug,2014-08-19T16:49:42.030,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1002_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:42.031,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1002]},
{checkpoints,[{1002,0}]},
{name,<<"replication_building_1002_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1002]},
{takeover,false},
{suffix,"building_1002_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1002,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:42.031,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19680.0>
[rebalance:debug,2014-08-19T16:49:42.032,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:42.032,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.27779.0>,#Ref<16550.0.1.29985>}]}
[rebalance:info,2014-08-19T16:49:42.032,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1002
[rebalance:debug,2014-08-19T16:49:42.032,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.27779.0>,#Ref<16550.0.1.29985>}]
[ns_server:debug,2014-08-19T16:49:42.033,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:42.053,ns_1@10.242.238.90:<0.19695.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1002
[ns_server:debug,2014-08-19T16:49:42.084,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1002. Nacking mccouch update.
[views:debug,2014-08-19T16:49:42.084,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1002. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:42.084,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1002,replica,0}
[ns_server:debug,2014-08-19T16:49:42.084,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,1003,759,1022,1006,762,1009,765,749,1012,752,
1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,1017,
757,1020,1004,760,1023,1007,763,1010]
[views:debug,2014-08-19T16:49:42.118,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1002. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:42.118,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1002,replica,0}
[ns_server:debug,2014-08-19T16:49:42.202,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 747. Nacking mccouch update.
[views:debug,2014-08-19T16:49:42.202,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/747. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:42.202,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",747,pending,0}
[ns_server:debug,2014-08-19T16:49:42.202,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,1003,759,1022,1006,762,1009,765,749,1012,752,
1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,1017,
757,1020,1004,760,1023,1007,763,747,1010]
[views:debug,2014-08-19T16:49:42.236,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/747. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:42.236,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",747,pending,0}
[rebalance:debug,2014-08-19T16:49:42.237,ns_1@10.242.238.90:<0.19083.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:42.237,ns_1@10.242.238.90:<0.19037.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.237,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19083.0> (ok)
[ns_server:debug,2014-08-19T16:49:42.237,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19037.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.345,ns_1@10.242.238.90:<0.19317.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.345,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19317.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.395,ns_1@10.242.238.90:<0.19232.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.395,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19232.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.446,ns_1@10.242.238.90:<0.19637.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:42.446,ns_1@10.242.238.90:<0.19141.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.446,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19637.0> (ok)
[ns_server:debug,2014-08-19T16:49:42.446,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19141.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.571,ns_1@10.242.238.90:<0.19539.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:42.571,ns_1@10.242.238.90:<0.19063.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.571,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19539.0> (ok)
[ns_server:debug,2014-08-19T16:49:42.571,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19063.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.680,ns_1@10.242.238.90:<0.19461.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:42.680,ns_1@10.242.238.90:<0.19145.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.680,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19461.0> (ok)
[ns_server:debug,2014-08-19T16:49:42.680,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19145.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.755,ns_1@10.242.238.90:<0.19390.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.755,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19390.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.830,ns_1@10.242.238.90:<0.19298.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.830,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19298.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.870,ns_1@10.242.238.90:<0.19223.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.870,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19223.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.920,ns_1@10.242.238.90:<0.19695.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.921,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19695.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.995,ns_1@10.242.238.90:<0.19611.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.996,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19611.0> (ok)
[rebalance:debug,2014-08-19T16:49:42.996,ns_1@10.242.238.90:<0.19676.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:42.996,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19676.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.138,ns_1@10.242.238.90:<0.19578.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:43.138,ns_1@10.242.238.90:<0.19533.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.138,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19578.0> (ok)
[ns_server:debug,2014-08-19T16:49:43.138,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19533.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.288,ns_1@10.242.238.90:<0.19441.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:43.288,ns_1@10.242.238.90:<0.19500.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.289,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19441.0> (ok)
[ns_server:debug,2014-08-19T16:49:43.289,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19500.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.414,ns_1@10.242.238.90:<0.19422.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:43.414,ns_1@10.242.238.90:<0.19370.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.414,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19422.0> (ok)
[ns_server:debug,2014-08-19T16:49:43.414,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19370.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.539,ns_1@10.242.238.90:<0.19268.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:43.539,ns_1@10.242.238.90:<0.19656.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.539,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19268.0> (ok)
[ns_server:debug,2014-08-19T16:49:43.539,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19656.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.665,ns_1@10.242.238.90:<0.19558.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.665,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19558.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.748,ns_1@10.242.238.90:<0.19480.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.748,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19480.0> (ok)
[rebalance:debug,2014-08-19T16:49:43.798,ns_1@10.242.238.90:<0.19415.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:43.798,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19415.0> (ok)
[ns_server:debug,2014-08-19T16:49:44.036,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:44.040,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3563 us
[ns_server:debug,2014-08-19T16:49:44.041,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:44.042,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:44.043,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{511,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:45.121,ns_1@10.242.238.90:<0.19732.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 767)
[ns_server:debug,2014-08-19T16:49:45.121,ns_1@10.242.238.90:<0.19732.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:45.121,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19731.0> (ok)
[rebalance:debug,2014-08-19T16:49:45.122,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:45.122,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:45.122,ns_1@10.242.238.90:<0.19733.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:45.123,ns_1@10.242.238.90:<0.19733.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:45.123,ns_1@10.242.238.90:<0.18847.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:45.180,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 767 state to active
[ns_server:debug,2014-08-19T16:49:45.199,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:45.203,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:45.203,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2231 us
[ns_server:debug,2014-08-19T16:49:45.204,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{767,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:45.205,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:49:45.230,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/767. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:45.230,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",767,active,1}
[rebalance:debug,2014-08-19T16:49:45.250,ns_1@10.242.238.90:<0.19735.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 760
[rebalance:debug,2014-08-19T16:49:45.251,ns_1@10.242.238.90:<0.19735.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:45.251,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19735.0> (ok)
[rebalance:debug,2014-08-19T16:49:45.255,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:45.256,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:45.256,ns_1@10.242.238.90:<0.19738.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:45.256,ns_1@10.242.238.90:<0.19738.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:45.256,ns_1@10.242.238.90:<0.18828.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:45.259,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1023 state to replica
[ns_server:info,2014-08-19T16:49:45.260,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:start_child:172]Starting replication from 'ns_1@10.242.238.91' for
[1023]
[error_logger:info,2014-08-19T16:49:45.262,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.19739.0>},
{name,{new_child_id,[1023],'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:45.263,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1023 to state replica
[ns_server:debug,2014-08-19T16:49:45.271,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:45.272,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_ns_1@10.242.238.90
[rebalance:info,2014-08-19T16:49:45.275,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1023]},
{checkpoints,[{1023,1}]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
{{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]}
[rebalance:debug,2014-08-19T16:49:45.276,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19740.0>
[rebalance:info,2014-08-19T16:49:45.279,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:process_upstream:1049]TAP stream is not doing backfill
[ns_server:debug,2014-08-19T16:49:45.281,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 9658 us
[ns_server:debug,2014-08-19T16:49:45.281,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:45.282,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1023,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:45.282,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:45.466,ns_1@10.242.238.90:<0.19749.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 762
[rebalance:debug,2014-08-19T16:49:45.467,ns_1@10.242.238.90:<0.19749.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:45.467,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19749.0> (ok)
[rebalance:debug,2014-08-19T16:49:45.616,ns_1@10.242.238.90:<0.19752.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 764
[rebalance:debug,2014-08-19T16:49:45.617,ns_1@10.242.238.90:<0.19752.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:45.617,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19752.0> (ok)
[ns_server:debug,2014-08-19T16:49:45.739,ns_1@10.242.238.90:<0.19756.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 766)
[ns_server:debug,2014-08-19T16:49:45.739,ns_1@10.242.238.90:<0.19756.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:45.739,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19755.0> (ok)
[rebalance:debug,2014-08-19T16:49:45.740,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:45.740,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:45.740,ns_1@10.242.238.90:<0.19757.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:45.740,ns_1@10.242.238.90:<0.19757.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:45.740,ns_1@10.242.238.90:<0.18886.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:45.795,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 766 state to active
[rebalance:debug,2014-08-19T16:49:45.817,ns_1@10.242.238.90:<0.19758.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1016
[ns_server:debug,2014-08-19T16:49:45.818,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:45.822,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:45.822,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3522 us
[ns_server:debug,2014-08-19T16:49:45.823,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:45.824,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{766,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:45.847,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/766. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:45.847,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",766,active,1}
[rebalance:debug,2014-08-19T16:49:45.848,ns_1@10.242.238.90:<0.19758.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:45.848,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19758.0> (ok)
[rebalance:debug,2014-08-19T16:49:45.918,ns_1@10.242.238.90:<0.19762.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1018
[rebalance:debug,2014-08-19T16:49:45.919,ns_1@10.242.238.90:<0.19762.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:45.919,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19762.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.055,ns_1@10.242.238.90:<0.19765.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1020
[rebalance:debug,2014-08-19T16:49:46.056,ns_1@10.242.238.90:<0.19765.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.056,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19765.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.171,ns_1@10.242.238.90:<0.19768.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1022
[rebalance:debug,2014-08-19T16:49:46.172,ns_1@10.242.238.90:<0.19768.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.173,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19768.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.345,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:46.346,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:46.346,ns_1@10.242.238.90:<0.19771.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:46.346,ns_1@10.242.238.90:<0.19771.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:46.346,ns_1@10.242.238.90:<0.18867.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:46.351,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1022 state to replica
[ns_server:info,2014-08-19T16:49:46.351,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1022,1023] ([1022], [])
[ns_server:debug,2014-08-19T16:49:46.352,ns_1@10.242.238.90:<0.19772.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,[1022,1023],'ns_1@10.242.238.91'},
#Ref<0.0.0.216691>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:46.353,ns_1@10.242.238.90:<0.19772.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.19739.0>
[ns_server:info,2014-08-19T16:49:46.353,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:46.361,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:46.361,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:46.361,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19774.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19774.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:46.362,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.19739.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.19772.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.19739.0>
[ns_server:debug,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.19772.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.19776.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.19739.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.19740.0>,<<"cut off">>,<<"cut off">>,[],7,false,false,0,
{1408,452586,361941},
completed,
{<0.19772.0>,#Ref<0.0.0.216704>},
<<"replication_ns_1@10.242.238.90">>,<0.19739.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:46.363,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.19772.0>,{#Ref<0.0.0.216693>,<0.19776.0>}}
[error_logger:info,2014-08-19T16:49:46.363,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.19776.0>},
{name,{new_child_id,[1022,1023],'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:46.367,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:46.367,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19777.0>
[ns_server:debug,2014-08-19T16:49:46.372,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:46.376,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:46.376,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4198 us
[ns_server:debug,2014-08-19T16:49:46.377,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1022,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:46.378,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:46.490,ns_1@10.242.238.90:<0.19779.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 747
[rebalance:debug,2014-08-19T16:49:46.492,ns_1@10.242.238.90:<0.19779.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.492,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19779.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.591,ns_1@10.242.238.90:<0.19782.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 749
[rebalance:debug,2014-08-19T16:49:46.592,ns_1@10.242.238.90:<0.19782.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.592,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19782.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.691,ns_1@10.242.238.90:<0.19785.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 751
[rebalance:debug,2014-08-19T16:49:46.692,ns_1@10.242.238.90:<0.19785.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.693,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19785.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.791,ns_1@10.242.238.90:<0.19788.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 753
[rebalance:debug,2014-08-19T16:49:46.793,ns_1@10.242.238.90:<0.19788.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.793,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19788.0> (ok)
[rebalance:debug,2014-08-19T16:49:46.901,ns_1@10.242.238.90:<0.19791.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 755
[rebalance:debug,2014-08-19T16:49:46.903,ns_1@10.242.238.90:<0.19791.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:46.903,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19791.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.035,ns_1@10.242.238.90:<0.19794.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 757
[rebalance:debug,2014-08-19T16:49:47.037,ns_1@10.242.238.90:<0.19794.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.037,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19794.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.112,ns_1@10.242.238.90:<0.19797.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 759
[rebalance:debug,2014-08-19T16:49:47.114,ns_1@10.242.238.90:<0.19797.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.114,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19797.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.116,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:47.119,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.120,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4403 us
[ns_server:debug,2014-08-19T16:49:47.121,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.121,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{504,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:47.180,ns_1@10.242.238.90:<0.19801.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 761
[rebalance:debug,2014-08-19T16:49:47.182,ns_1@10.242.238.90:<0.19801.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.182,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19801.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.218,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.218,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:47.218,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 23 us
[ns_server:debug,2014-08-19T16:49:47.219,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.219,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{506,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:47.227,ns_1@10.242.238.90:<0.19805.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 763
[rebalance:debug,2014-08-19T16:49:47.228,ns_1@10.242.238.90:<0.19805.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.228,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19805.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.269,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:49:47.270,ns_1@10.242.238.90:<0.19808.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 748
[rebalance:debug,2014-08-19T16:49:47.270,ns_1@10.242.238.90:<0.19811.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 765
[rebalance:debug,2014-08-19T16:49:47.271,ns_1@10.242.238.90:<0.19808.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.271,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19808.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.273,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.273,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3144 us
[rebalance:debug,2014-08-19T16:49:47.273,ns_1@10.242.238.90:<0.19811.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.274,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19811.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.274,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{508,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:47.274,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.316,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:47.324,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.325,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 8106 us
[ns_server:debug,2014-08-19T16:49:47.325,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.326,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{510,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:47.353,ns_1@10.242.238.90:<0.19816.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 750
[rebalance:debug,2014-08-19T16:49:47.354,ns_1@10.242.238.90:<0.19819.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1003
[rebalance:debug,2014-08-19T16:49:47.354,ns_1@10.242.238.90:<0.19816.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.354,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19816.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.355,ns_1@10.242.238.90:<0.19819.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.355,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19819.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.445,ns_1@10.242.238.90:<0.19828.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 752
[rebalance:debug,2014-08-19T16:49:47.445,ns_1@10.242.238.90:<0.19831.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1005
[rebalance:debug,2014-08-19T16:49:47.446,ns_1@10.242.238.90:<0.19828.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.446,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19828.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.447,ns_1@10.242.238.90:<0.19831.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.447,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19831.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.564,ns_1@10.242.238.90:<0.19834.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 754
[rebalance:debug,2014-08-19T16:49:47.564,ns_1@10.242.238.90:<0.19837.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1007
[rebalance:debug,2014-08-19T16:49:47.565,ns_1@10.242.238.90:<0.19834.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.565,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19834.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.565,ns_1@10.242.238.90:<0.19837.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.565,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19837.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.681,ns_1@10.242.238.90:<0.19840.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1009
[rebalance:debug,2014-08-19T16:49:47.681,ns_1@10.242.238.90:<0.19843.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 756
[rebalance:debug,2014-08-19T16:49:47.682,ns_1@10.242.238.90:<0.19843.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.682,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19843.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.682,ns_1@10.242.238.90:<0.19840.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.682,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19840.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.793,ns_1@10.242.238.90:<0.19847.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 758
[rebalance:debug,2014-08-19T16:49:47.793,ns_1@10.242.238.90:<0.19850.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1011
[rebalance:debug,2014-08-19T16:49:47.794,ns_1@10.242.238.90:<0.19847.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.794,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19847.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.794,ns_1@10.242.238.90:<0.19850.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.794,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19850.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.889,ns_1@10.242.238.90:<0.19854.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 760)
[ns_server:debug,2014-08-19T16:49:47.889,ns_1@10.242.238.90:<0.19854.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:47.889,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19853.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.890,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:47.890,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:47.890,ns_1@10.242.238.90:<0.19855.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:47.890,ns_1@10.242.238.90:<0.19855.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:47.890,ns_1@10.242.238.90:<0.19105.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:49:47.891,ns_1@10.242.238.90:<0.19856.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1013
[rebalance:debug,2014-08-19T16:49:47.892,ns_1@10.242.238.90:<0.19856.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.893,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19856.0> (ok)
[ns_server:debug,2014-08-19T16:49:47.940,ns_1@10.242.238.90:<0.19860.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 762)
[ns_server:debug,2014-08-19T16:49:47.940,ns_1@10.242.238.90:<0.19860.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:47.940,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19859.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.940,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:47.940,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:47.941,ns_1@10.242.238.90:<0.19861.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:47.941,ns_1@10.242.238.90:<0.19861.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:47.941,ns_1@10.242.238.90:<0.19040.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:49:47.942,ns_1@10.242.238.90:<0.19862.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1015
[rebalance:debug,2014-08-19T16:49:47.943,ns_1@10.242.238.90:<0.19862.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:47.943,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19862.0> (ok)
[ns_server:info,2014-08-19T16:49:47.946,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 760 state to active
[ns_server:debug,2014-08-19T16:49:47.971,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:47.975,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3624 us
[ns_server:debug,2014-08-19T16:49:47.975,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.975,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:47.976,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{760,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:47.991,ns_1@10.242.238.90:<0.19867.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 764)
[ns_server:debug,2014-08-19T16:49:47.991,ns_1@10.242.238.90:<0.19867.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:47.991,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19866.0> (ok)
[rebalance:debug,2014-08-19T16:49:47.991,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:47.992,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:47.992,ns_1@10.242.238.90:<0.19868.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:47.992,ns_1@10.242.238.90:<0.19868.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:49:47.992,ns_1@10.242.238.90:<0.19869.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1017
[rebalance:info,2014-08-19T16:49:47.992,ns_1@10.242.238.90:<0.18956.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:47.996,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 762 state to active
[ns_server:debug,2014-08-19T16:49:48.014,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:49:48.014,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/760. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.014,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",760,active,1}
[rebalance:debug,2014-08-19T16:49:48.015,ns_1@10.242.238.90:<0.19869.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.015,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19869.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.017,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.017,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3226 us
[ns_server:debug,2014-08-19T16:49:48.017,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.018,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{762,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:48.032,ns_1@10.242.238.90:<0.19874.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1019
[rebalance:debug,2014-08-19T16:49:48.032,ns_1@10.242.238.90:<0.19873.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1002
[ns_server:info,2014-08-19T16:49:48.049,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 764 state to active
[ns_server:debug,2014-08-19T16:49:48.071,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.075,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.075,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3871 us
[ns_server:debug,2014-08-19T16:49:48.075,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.076,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{764,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:48.097,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/762. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.098,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",762,active,1}
[rebalance:debug,2014-08-19T16:49:48.098,ns_1@10.242.238.90:<0.19873.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.098,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19873.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.154,ns_1@10.242.238.90:<0.19880.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1021
[rebalance:debug,2014-08-19T16:49:48.154,ns_1@10.242.238.90:<0.19883.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1004
[views:debug,2014-08-19T16:49:48.172,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/764. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.172,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",764,active,1}
[rebalance:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:<0.19874.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:<0.19883.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:<0.19880.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19874.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19883.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.173,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19880.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.279,ns_1@10.242.238.90:<0.19886.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1006
[rebalance:debug,2014-08-19T16:49:48.280,ns_1@10.242.238.90:<0.19886.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.280,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19886.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.328,ns_1@10.242.238.90:<0.19889.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1008
[rebalance:debug,2014-08-19T16:49:48.330,ns_1@10.242.238.90:<0.19889.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.330,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19889.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.357,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.360,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.360,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3305 us
[ns_server:debug,2014-08-19T16:49:48.361,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{491,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.362,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:48.381,ns_1@10.242.238.90:<0.19901.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1010
[rebalance:debug,2014-08-19T16:49:48.382,ns_1@10.242.238.90:<0.19901.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.382,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19901.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.403,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.408,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.409,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1883 us
[ns_server:debug,2014-08-19T16:49:48.409,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{493,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.409,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:48.437,ns_1@10.242.238.90:<0.19908.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1012
[rebalance:debug,2014-08-19T16:49:48.438,ns_1@10.242.238.90:<0.19908.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.438,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19908.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.454,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.461,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.461,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5097 us
[ns_server:debug,2014-08-19T16:49:48.461,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.462,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{495,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:48.470,ns_1@10.242.238.90:<0.19912.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1014
[rebalance:debug,2014-08-19T16:49:48.472,ns_1@10.242.238.90:<0.19912.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:48.472,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19912.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.514,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.517,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.518,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4097 us
[ns_server:debug,2014-08-19T16:49:48.518,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{497,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.519,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.529,ns_1@10.242.238.90:<0.19917.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 747)
[ns_server:debug,2014-08-19T16:49:48.529,ns_1@10.242.238.90:<0.19917.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.529,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19916.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.530,ns_1@10.242.238.90:<0.19919.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 751)
[ns_server:debug,2014-08-19T16:49:48.530,ns_1@10.242.238.90:<0.19919.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.530,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19918.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19922.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 763)
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19922.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19927.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19920.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19928.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 749)
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19929.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19927.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.531,ns_1@10.242.238.90:<0.19929.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19928.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19932.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 753)
[rebalance:info,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19483.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19921.0> (ok)
[rebalance:info,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19659.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19932.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19937.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 757)
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19923.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19937.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19940.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 755)
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19924.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19941.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 759)
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19940.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19941.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19942.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 765)
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:<0.19942.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19944.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 748)
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19943.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.532,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19926.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19944.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19945.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19925.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19943.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19946.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 752)
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19945.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19930.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19946.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19947.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 754)
[rebalance:info,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19001.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19931.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:info,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19561.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19947.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19949.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 761)
[rebalance:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19948.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19933.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19949.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19948.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19950.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:<0.19951.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 750)
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19419.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.533,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19935.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19951.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19950.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19952.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19934.0> (ok)
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19953.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 756)
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19952.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19954.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19246.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19144.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19936.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19953.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19954.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19955.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 758)
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19938.0> (ok)
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19956.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19957.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19955.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19956.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19957.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19614.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.534,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.19939.0> (ok)
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.19320.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:49:48.534,ns_1@10.242.238.90:<0.18911.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19960.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19958.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19959.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19960.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19958.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19959.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:info,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19373.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19444.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19066.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19961.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19962.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19961.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19962.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19963.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:49:48.535,ns_1@10.242.238.90:<0.19271.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.536,ns_1@10.242.238.90:<0.19963.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.536,ns_1@10.242.238.90:<0.19536.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:49:48.536,ns_1@10.242.238.90:<0.19206.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.646,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.650,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.650,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3688 us
[ns_server:debug,2014-08-19T16:49:48.650,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.651,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{499,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.680,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.684,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.685,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4219 us
[ns_server:debug,2014-08-19T16:49:48.686,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{503,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.686,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.704,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.709,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2908 us
[ns_server:debug,2014-08-19T16:49:48.709,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.710,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:48.710,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 751 state to active
[ns_server:debug,2014-08-19T16:49:48.710,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{501,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.734,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.737,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.737,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3246 us
[ns_server:debug,2014-08-19T16:49:48.738,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.739,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{509,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:48.748,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/751. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.748,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",751,active,1}
[ns_server:debug,2014-08-19T16:49:48.756,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.764,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 8084 us
[ns_server:debug,2014-08-19T16:49:48.764,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.765,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.765,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{494,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.790,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.794,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3691 us
[ns_server:debug,2014-08-19T16:49:48.794,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.794,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.795,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{751,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.808,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.810,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2570 us
[ns_server:info,2014-08-19T16:49:48.811,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 750 state to active
[ns_server:debug,2014-08-19T16:49:48.811,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.812,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{505,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.812,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:48.823,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 753 state to active
[ns_server:debug,2014-08-19T16:49:48.826,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.830,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.831,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5159 us
[ns_server:debug,2014-08-19T16:49:48.832,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{498,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:48.834,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:48.841,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.841,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.841,ns_1@10.242.238.90:<0.19970.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.842,ns_1@10.242.238.90:<0.19970.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.842,ns_1@10.242.238.90:<0.19354.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.844,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.848,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3378 us
[ns_server:debug,2014-08-19T16:49:48.848,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.848,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.849,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{496,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:48.865,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/750. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.865,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",750,active,1}
[rebalance:debug,2014-08-19T16:49:48.866,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.866,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.866,ns_1@10.242.238.90:<0.19972.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.867,ns_1@10.242.238.90:<0.19972.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.867,ns_1@10.242.238.90:<0.18982.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.871,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.872,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.872,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1419 us
[ns_server:debug,2014-08-19T16:49:48.873,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.873,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{492,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:48.884,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.885,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.885,ns_1@10.242.238.90:<0.19973.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.885,ns_1@10.242.238.90:<0.19973.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.885,ns_1@10.242.238.90:<0.19227.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.891,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.897,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.897,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5405 us
[ns_server:debug,2014-08-19T16:49:48.898,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.898,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{507,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:48.912,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 763 state to active
[views:debug,2014-08-19T16:49:48.915,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/753. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.915,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",753,active,1}
[ns_server:debug,2014-08-19T16:49:48.917,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.919,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.919,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1321 us
[ns_server:debug,2014-08-19T16:49:48.919,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.920,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{750,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:48.956,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.956,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.956,ns_1@10.242.238.90:<0.19976.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.956,ns_1@10.242.238.90:<0.19976.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.956,ns_1@10.242.238.90:<0.19517.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.959,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:48.962,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2836 us
[ns_server:debug,2014-08-19T16:49:48.962,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.963,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.963,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{753,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:48.967,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1010 state to replica
[ns_server:info,2014-08-19T16:49:48.967,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1010,1022,1023] ([1010], [])
[ns_server:debug,2014-08-19T16:49:48.968,ns_1@10.242.238.90:<0.19978.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1010,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.218935>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:48.969,ns_1@10.242.238.90:<0.19978.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.19776.0>
[ns_server:info,2014-08-19T16:49:48.969,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:49:48.970,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.970,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.970,ns_1@10.242.238.90:<0.19980.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.970,ns_1@10.242.238.90:<0.19980.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.970,ns_1@10.242.238.90:<0.19430.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[views:debug,2014-08-19T16:49:48.976,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/763. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:48.976,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",763,active,1}
[ns_server:info,2014-08-19T16:49:48.978,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1010,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:48.978,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19981.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19981.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:48.979,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19776.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19978.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.19776.0>
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19978.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19983.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:48.980,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.19776.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.19777.0>,<<"cut off">>,<<"cut off">>,[],10,false,false,0,
{1408,452588,979111},
completed,
{<0.19978.0>,#Ref<0.0.0.218949>},
<<"replication_ns_1@10.242.238.90">>,<0.19776.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:48.981,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.19978.0>,{#Ref<0.0.0.218937>,<0.19983.0>}}
[error_logger:info,2014-08-19T16:49:48.981,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.19983.0>},
{name,
{new_child_id,
[1010,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:48.984,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1010,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:48.984,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19984.0>
[rebalance:debug,2014-08-19T16:49:48.988,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:48.989,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:48.989,ns_1@10.242.238.90:<0.19985.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:48.989,ns_1@10.242.238.90:<0.19985.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:49:48.989,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:info,2014-08-19T16:49:48.989,ns_1@10.242.238.90:<0.19086.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:48.993,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.993,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4478 us
[ns_server:debug,2014-08-19T16:49:48.994,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:48.994,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1010,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:48.997,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1019 state to replica
[ns_server:info,2014-08-19T16:49:48.997,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1010,1019,1022,1023] ([1019], [])
[ns_server:debug,2014-08-19T16:49:49.001,ns_1@10.242.238.90:<0.19987.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1010,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.219123>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.001,ns_1@10.242.238.90:<0.19987.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.19983.0>
[ns_server:info,2014-08-19T16:49:49.001,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.009,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1010,1},{1019,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19989.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19989.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.010,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19983.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19987.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.19983.0>
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19987.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19991.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.011,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.19983.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.19984.0>,<<"cut off">>,<<"cut off">>,[],13,false,false,0,
{1408,452589,10260},
completed,
{<0.19987.0>,#Ref<0.0.0.219138>},
<<"replication_ns_1@10.242.238.90">>,<0.19983.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.012,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.19987.0>,{#Ref<0.0.0.219125>,<0.19991.0>}}
[error_logger:info,2014-08-19T16:49:49.012,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.19991.0>},
{name,
{new_child_id,
[1010,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:49.012,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 759 state to active
[ns_server:debug,2014-08-19T16:49:49.016,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.016,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1010,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.016,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.19992.0>
[ns_server:debug,2014-08-19T16:49:49.023,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6861 us
[ns_server:debug,2014-08-19T16:49:49.026,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:49.026,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:info,2014-08-19T16:49:49.026,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1013 state to replica
[ns_server:debug,2014-08-19T16:49:49.027,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.027,ns_1@10.242.238.90:<0.19993.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:info,2014-08-19T16:49:49.027,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1010,1013,1019,1022,1023] ([1013], [])
[ns_server:debug,2014-08-19T16:49:49.027,ns_1@10.242.238.90:<0.19993.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.028,ns_1@10.242.238.90:<0.19125.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.028,ns_1@10.242.238.90:<0.19994.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.219282>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.028,ns_1@10.242.238.90:<0.19994.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.19991.0>
[ns_server:info,2014-08-19T16:49:49.028,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:debug,2014-08-19T16:49:49.028,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.029,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1019,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.037,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1010,1},{1013,1},{1019,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:49.038,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.038,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.038,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.038,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19997.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19997.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19991.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.039,ns_1@10.242.238.90:<0.19994.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.19991.0>
[ns_server:debug,2014-08-19T16:49:49.040,ns_1@10.242.238.90:<0.19994.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.040,ns_1@10.242.238.90:<0.19999.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.040,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.19991.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.19992.0>,<<"cut off">>,<<"cut off">>,[],16,false,false,0,
{1408,452589,38601},
completed,
{<0.19994.0>,#Ref<0.0.0.219300>},
<<"replication_ns_1@10.242.238.90">>,<0.19991.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.040,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.19994.0>,{#Ref<0.0.0.219284>,<0.19999.0>}}
[error_logger:info,2014-08-19T16:49:49.040,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.19999.0>},
{name,
{new_child_id,
[1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:49.042,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 755 state to active
[ns_server:debug,2014-08-19T16:49:49.044,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1010,1013,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.044,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20000.0>
[ns_server:debug,2014-08-19T16:49:49.045,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.049,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.049,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3753 us
[ns_server:debug,2014-08-19T16:49:49.050,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.051,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1013,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:49.057,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/759. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.057,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",759,active,1}
[ns_server:info,2014-08-19T16:49:49.061,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 758 state to active
[ns_server:debug,2014-08-19T16:49:49.072,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.081,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.081,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 8607 us
[ns_server:info,2014-08-19T16:49:49.081,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 748 state to active
[ns_server:debug,2014-08-19T16:49:49.082,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{502,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.085,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.097,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:49:49.100,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.100,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.100,ns_1@10.242.238.90:<0.20002.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.100,ns_1@10.242.238.90:<0.20002.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.100,ns_1@10.242.238.90:<0.19556.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.101,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.101,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1680 us
[ns_server:debug,2014-08-19T16:49:49.102,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.103,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{763,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:49.107,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/755. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.107,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",755,active,1}
[rebalance:debug,2014-08-19T16:49:49.126,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.126,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.126,ns_1@10.242.238.90:<0.20004.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.127,ns_1@10.242.238.90:<0.20004.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.127,ns_1@10.242.238.90:<0.19609.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.129,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.133,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3374 us
[ns_server:debug,2014-08-19T16:49:49.133,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.133,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.134,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{500,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.136,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1006 state to replica
[ns_server:info,2014-08-19T16:49:49.136,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1006,1010,1013,1019,1022,1023] ([1006], [])
[ns_server:debug,2014-08-19T16:49:49.139,ns_1@10.242.238.90:<0.20005.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1006,1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.219627>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1006,1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.140,ns_1@10.242.238.90:<0.20005.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.19999.0>
[ns_server:info,2014-08-19T16:49:49.140,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[views:debug,2014-08-19T16:49:49.141,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/758. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.141,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",758,active,1}
[ns_server:info,2014-08-19T16:49:49.148,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1006,1},{1010,1},{1013,1},{1019,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:49.148,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.148,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.20008.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.20008.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.149,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.150,ns_1@10.242.238.90:<0.19999.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.150,ns_1@10.242.238.90:<0.20005.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.19999.0>
[ns_server:debug,2014-08-19T16:49:49.150,ns_1@10.242.238.90:<0.20005.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.150,ns_1@10.242.238.90:<0.20010.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.150,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.19999.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20000.0>,<<"cut off">>,<<"cut off">>,[],19,false,false,0,
{1408,452589,148946},
completed,
{<0.20005.0>,#Ref<0.0.0.219641>},
<<"replication_ns_1@10.242.238.90">>,<0.19999.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.151,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20005.0>,{#Ref<0.0.0.219629>,<0.20010.0>}}
[error_logger:info,2014-08-19T16:49:49.151,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20010.0>},
{name,
{new_child_id,
[1006,1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1006,1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:49.151,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 749 state to active
[ns_server:debug,2014-08-19T16:49:49.155,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1006,1010,1013,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[ns_server:debug,2014-08-19T16:49:49.158,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.160,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.161,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1006,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:49.164,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:49:49.173,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20011.0>
[ns_server:debug,2014-08-19T16:49:49.173,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 14 us
[ns_server:debug,2014-08-19T16:49:49.173,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.173,ns_1@10.242.238.90:<0.20013.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.173,ns_1@10.242.238.90:<0.20013.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.175,ns_1@10.242.238.90:<0.19060.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.175,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:49.176,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 757 state to active
[ns_server:info,2014-08-19T16:49:49.178,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1008 state to replica
[ns_server:info,2014-08-19T16:49:49.179,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1006,1008,1010,1013,1019,1022,1023] ([1008], [])
[ns_server:debug,2014-08-19T16:49:49.179,ns_1@10.242.238.90:<0.20014.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1006,1008,1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.219836>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1006,1008,1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.180,ns_1@10.242.238.90:<0.20014.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20010.0>
[ns_server:info,2014-08-19T16:49:49.180,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.183,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1006,1},{1008,1},{1010,1},{1013,1},{1019,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20016.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.184,ns_1@10.242.238.90:<0.20016.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20010.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20014.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20010.0>
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20014.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20018.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.185,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20010.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20011.0>,<<"cut off">>,<<"cut off">>,[],22,false,false,0,
{1408,452589,184229},
completed,
{<0.20014.0>,#Ref<0.0.0.219849>},
<<"replication_ns_1@10.242.238.90">>,<0.20010.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.186,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20014.0>,{#Ref<0.0.0.219838>,<0.20018.0>}}
[error_logger:info,2014-08-19T16:49:49.186,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20018.0>},
{name,
{new_child_id,
[1006,1008,1010,1013,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1006,1008,1010,1013,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.189,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1006,1008,1010,1013,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.189,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20019.0>
[views:debug,2014-08-19T16:49:49.192,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/748. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.192,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",748,active,1}
[ns_server:debug,2014-08-19T16:49:49.193,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.195,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2481 us
[ns_server:debug,2014-08-19T16:49:49.195,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.196,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.196,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1008,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.203,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1016 state to replica
[ns_server:info,2014-08-19T16:49:49.203,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1006,1008,1010,1013,1016,1019,1022,1023] ([1016], [])
[rebalance:debug,2014-08-19T16:49:49.204,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.204,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.204,ns_1@10.242.238.90:<0.20020.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.205,ns_1@10.242.238.90:<0.20020.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.205,ns_1@10.242.238.90:<0.19464.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.206,ns_1@10.242.238.90:<0.20021.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1006,1008,1010,1013,1016,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.220009>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1006,1008,1010,1013,1016,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.207,ns_1@10.242.238.90:<0.20021.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20018.0>
[ns_server:info,2014-08-19T16:49:49.207,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.211,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1006,1},{1008,1},{1010,1},{1013,1},{1016,1},{1019,1},{1022,1},{1023,1}]
[ns_server:info,2014-08-19T16:49:49.211,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.211,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.211,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.211,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20024.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20024.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20018.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.212,ns_1@10.242.238.90:<0.20021.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20018.0>
[ns_server:debug,2014-08-19T16:49:49.213,ns_1@10.242.238.90:<0.20021.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.213,ns_1@10.242.238.90:<0.20026.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.213,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20018.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20019.0>,<<"cut off">>,<<"cut off">>,[],25,false,false,0,
{1408,452589,211688},
completed,
{<0.20021.0>,#Ref<0.0.0.220023>},
<<"replication_ns_1@10.242.238.90">>,<0.20018.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.214,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20021.0>,{#Ref<0.0.0.220011>,<0.20026.0>}}
[error_logger:info,2014-08-19T16:49:49.214,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20026.0>},
{name,
{new_child_id,
[1006,1008,1010,1013,1016,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1006,1008,1010,1013,1016,1019,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.217,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1006,1008,1010,1013,1016,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.218,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20027.0>
[ns_server:debug,2014-08-19T16:49:49.220,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.221,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.222,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1252 us
[ns_server:debug,2014-08-19T16:49:49.222,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:49.222,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 754 state to active
[ns_server:debug,2014-08-19T16:49:49.223,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1016,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:49.226,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/757. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.226,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",757,active,1}
[ns_server:info,2014-08-19T16:49:49.236,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 765 state to active
[ns_server:debug,2014-08-19T16:49:49.240,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.248,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7788 us
[ns_server:debug,2014-08-19T16:49:49.248,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.249,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.250,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{759,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.253,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1015 state to replica
[ns_server:info,2014-08-19T16:49:49.253,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1006,1008,1010,1013,1015,1016,1019,1022,1023] ([1015], [])
[ns_server:debug,2014-08-19T16:49:49.254,ns_1@10.242.238.90:<0.20029.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1006,1008,1010,1013,1015,1016,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.220220>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.254,ns_1@10.242.238.90:<0.20029.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20026.0>
[ns_server:info,2014-08-19T16:49:49.255,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.263,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1006,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.263,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.263,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20031.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20031.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.264,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.20026.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.20029.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20026.0>
[ns_server:debug,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.20029.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.20033.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20026.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20027.0>,<<"cut off">>,<<"cut off">>,[],28,false,false,0,
{1408,452589,263939},
completed,
{<0.20029.0>,#Ref<0.0.0.220233>},
<<"replication_ns_1@10.242.238.90">>,<0.20026.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.265,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20029.0>,{#Ref<0.0.0.220222>,<0.20033.0>}}
[error_logger:info,2014-08-19T16:49:49.265,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20033.0>},
{name,
{new_child_id,
[1006,1008,1010,1013,1015,1016,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1006,1008,1010,1013,1015,1016,1019,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:49:49.266,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 747 state to active
[ns_server:debug,2014-08-19T16:49:49.269,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.269,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20035.0>
[ns_server:info,2014-08-19T16:49:49.270,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 752 state to active
[ns_server:debug,2014-08-19T16:49:49.273,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:49:49.273,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/749. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.274,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",749,active,1}
[ns_server:debug,2014-08-19T16:49:49.276,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.276,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3465 us
[ns_server:debug,2014-08-19T16:49:49.277,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1015,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.278,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:49:49.292,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.293,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.293,ns_1@10.242.238.90:<0.20036.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.293,ns_1@10.242.238.90:<0.20036.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.293,ns_1@10.242.238.90:<0.19035.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.299,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.301,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.301,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1599 us
[ns_server:debug,2014-08-19T16:49:49.301,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.302,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{755,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:49.303,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.303,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.303,ns_1@10.242.238.90:<0.20038.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.303,ns_1@10.242.238.90:<0.20038.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.303,ns_1@10.242.238.90:<0.19187.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.316,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:49:49.319,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 761 state to active
[ns_server:debug,2014-08-19T16:49:49.320,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1926 us
[ns_server:debug,2014-08-19T16:49:49.320,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.321,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.321,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{758,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:49:49.323,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/765. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.323,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",765,active,1}
[ns_server:debug,2014-08-19T16:49:49.339,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:49:49.342,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.343,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4282 us
[ns_server:debug,2014-08-19T16:49:49.343,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.344,ns_1@10.242.238.90:<0.20040.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.344,ns_1@10.242.238.90:<0.20040.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.347,ns_1@10.242.238.90:<0.18906.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.347,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:49.347,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1005 state to replica
[ns_server:info,2014-08-19T16:49:49.347,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1005,1006,1008,1010,1013,1015,1016,1019,1022,1023] ([1005], [])
[ns_server:debug,2014-08-19T16:49:49.347,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.349,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{748,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.349,ns_1@10.242.238.90:<0.20041.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1005,1006,1008,1010,1013,1015,1016,1019,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.220562>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1005,1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.349,ns_1@10.242.238.90:<0.20041.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20033.0>
[ns_server:info,2014-08-19T16:49:49.350,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:49:49.356,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.356,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.357,ns_1@10.242.238.90:<0.20043.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.357,ns_1@10.242.238.90:<0.20043.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.357,ns_1@10.242.238.90:<0.19393.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:49.358,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1005,1},
{1006,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.358,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20045.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20045.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.359,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20033.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20041.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20033.0>
[ns_server:debug,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20041.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20047.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.360,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20033.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20035.0>,<<"cut off">>,<<"cut off">>,[],31,false,false,0,
{1408,452589,359125},
completed,
{<0.20041.0>,#Ref<0.0.0.220577>},
<<"replication_ns_1@10.242.238.90">>,<0.20033.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[error_logger:info,2014-08-19T16:49:49.360,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20047.0>},
{name,
{new_child_id,
[1005,1006,1008,1010,1013,1015,1016,1019,1022,
1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1005,1006,1008,1010,1013,1015,1016,1019,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.361,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20041.0>,{#Ref<0.0.0.220564>,<0.20047.0>}}
[ns_server:debug,2014-08-19T16:49:49.364,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1005,1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.364,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20048.0>
[ns_server:debug,2014-08-19T16:49:49.369,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.370,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1105 us
[ns_server:debug,2014-08-19T16:49:49.370,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.371,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.371,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1005,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.373,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1004 state to replica
[ns_server:info,2014-08-19T16:49:49.374,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1008,1010,1013,1015,1016,1019,1022,1023] ([1004], [])
[rebalance:debug,2014-08-19T16:49:49.374,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.374,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.374,ns_1@10.242.238.90:<0.20055.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.374,ns_1@10.242.238.90:<0.20055.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.375,ns_1@10.242.238.90:<0.19654.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.374,ns_1@10.242.238.90:<0.20056.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1008,1010,1013,1015,1016,1019,
1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.220722>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.375,ns_1@10.242.238.90:<0.20056.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20047.0>
[ns_server:info,2014-08-19T16:49:49.375,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.379,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.379,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.379,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.379,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.379,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20058.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20058.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20047.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.380,ns_1@10.242.238.90:<0.20056.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20047.0>
[ns_server:debug,2014-08-19T16:49:49.381,ns_1@10.242.238.90:<0.20056.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.381,ns_1@10.242.238.90:<0.20060.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.381,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20047.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20048.0>,<<"cut off">>,<<"cut off">>,[],34,false,false,0,
{1408,452589,379614},
completed,
{<0.20056.0>,#Ref<0.0.0.220745>},
<<"replication_ns_1@10.242.238.90">>,<0.20047.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.381,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20056.0>,{#Ref<0.0.0.220724>,<0.20060.0>}}
[error_logger:info,2014-08-19T16:49:49.381,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20060.0>},
{name,
{new_child_id,
[1004,1005,1006,1008,1010,1013,1015,1016,1019,
1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1008,1010,1013,1015,1016,
1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.385,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1008,1010,1013,1015,1016,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.385,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20061.0>
[ns_server:debug,2014-08-19T16:49:49.391,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:49:49.395,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.395,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.396,ns_1@10.242.238.90:<0.20063.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.396,ns_1@10.242.238.90:<0.20063.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.396,ns_1@10.242.238.90:<0.19266.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.397,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5947 us
[ns_server:debug,2014-08-19T16:49:49.397,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.397,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.398,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1004,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.401,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1017 state to replica
[ns_server:info,2014-08-19T16:49:49.401,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1008,1010,1013,1015,1016,1017,1019,1022,1023] ([1017], [])
[rebalance:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.20064.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1008,1010,1013,1015,1016,1017,
1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.220888>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1008,1010,1013,1015,1016,1017,1019,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.20064.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20060.0>
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:info,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.20066.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.20066.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[views:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/747. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",747,active,1}
[rebalance:info,2014-08-19T16:49:49.408,ns_1@10.242.238.90:<0.18931.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:49:49.419,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1017,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.420,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.420,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.420,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20067.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20067.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.421,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20060.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20064.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20060.0>
[ns_server:debug,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20064.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20069.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.422,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20060.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20061.0>,<<"cut off">>,<<"cut off">>,[],37,false,false,0,
{1408,452589,420766},
completed,
{<0.20064.0>,#Ref<0.0.0.220921>},
<<"replication_ns_1@10.242.238.90">>,<0.20060.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.423,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20064.0>,{#Ref<0.0.0.220895>,<0.20069.0>}}
[error_logger:info,2014-08-19T16:49:49.423,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20069.0>},
{name,
{new_child_id,
[1004,1005,1006,1008,1010,1013,1015,1016,1017,
1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1008,1010,1013,1015,1016,
1017,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.426,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1008,1010,1013,1015,1016,1017,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20070.0>
[rebalance:debug,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.20072.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.20072.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.427,ns_1@10.242.238.90:<0.19301.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.429,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.432,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.432,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3523 us
[ns_server:debug,2014-08-19T16:49:49.433,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.434,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1017,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:49:49.448,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:49:49.448,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.448,ns_1@10.242.238.90:<0.20073.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.448,ns_1@10.242.238.90:<0.20073.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.449,ns_1@10.242.238.90:<0.19679.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.454,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.457,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.457,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2546 us
[ns_server:debug,2014-08-19T16:49:49.457,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.458,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{749,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.460,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 756 state to active
[ns_server:debug,2014-08-19T16:49:49.473,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:49:49.474,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/754. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.474,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",754,active,1}
[ns_server:debug,2014-08-19T16:49:49.479,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.479,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5374 us
[ns_server:debug,2014-08-19T16:49:49.479,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:49:49.482,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1007 state to replica
[ns_server:info,2014-08-19T16:49:49.482,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1019,1022,1023] ([1007], [])
[ns_server:debug,2014-08-19T16:49:49.480,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{757,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.484,ns_1@10.242.238.90:<0.20075.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1015,1016,
1017,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.221183>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1019,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.484,ns_1@10.242.238.90:<0.20075.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20069.0>
[ns_server:info,2014-08-19T16:49:49.484,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.497,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1017,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.497,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.498,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.498,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.498,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.498,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20078.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20078.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20069.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.499,ns_1@10.242.238.90:<0.20075.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20069.0>
[ns_server:debug,2014-08-19T16:49:49.500,ns_1@10.242.238.90:<0.20075.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.500,ns_1@10.242.238.90:<0.20080.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.500,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20069.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20070.0>,<<"cut off">>,<<"cut off">>,[],40,false,false,0,
{1408,452589,498578},
completed,
{<0.20075.0>,#Ref<0.0.0.221198>},
<<"replication_ns_1@10.242.238.90">>,<0.20069.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.500,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20075.0>,{#Ref<0.0.0.221185>,<0.20080.0>}}
[error_logger:info,2014-08-19T16:49:49.500,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20080.0>},
{name,
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1015,1016,
1017,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1007,1008,1010,1013,1015,
1016,1017,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.503,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1019,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.504,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20081.0>
[ns_server:debug,2014-08-19T16:49:49.506,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.509,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3037 us
[ns_server:debug,2014-08-19T16:49:49.509,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.510,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1007,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.511,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.528,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.531,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.532,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3459 us
[ns_server:debug,2014-08-19T16:49:49.532,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:49:49.532,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/752. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.533,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",752,active,1}
[ns_server:debug,2014-08-19T16:49:49.533,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{754,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.581,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:49:49.583,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/761. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.583,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",761,active,1}
[ns_server:debug,2014-08-19T16:49:49.588,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7370 us
[ns_server:debug,2014-08-19T16:49:49.588,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.589,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.590,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{765,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.613,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.614,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1402 us
[ns_server:debug,2014-08-19T16:49:49.614,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.615,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.616,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{747,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.638,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.641,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.641,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3154 us
[ns_server:debug,2014-08-19T16:49:49.642,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:49:49.642,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/756. Updated state: active (1)
[ns_server:debug,2014-08-19T16:49:49.642,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",756,active,1}
[ns_server:debug,2014-08-19T16:49:49.642,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{752,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.647,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1018 state to replica
[ns_server:info,2014-08-19T16:49:49.648,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1018,1019,1022,1023] ([1018], [])
[ns_server:debug,2014-08-19T16:49:49.650,ns_1@10.242.238.90:<0.20086.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1015,1016,
1017,1018,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.221506>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1018,1019,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.651,ns_1@10.242.238.90:<0.20086.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20080.0>
[ns_server:info,2014-08-19T16:49:49.651,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.658,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1010,1},
{1013,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.659,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.659,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.659,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.659,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.659,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20088.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20088.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20080.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.660,ns_1@10.242.238.90:<0.20086.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20080.0>
[ns_server:debug,2014-08-19T16:49:49.661,ns_1@10.242.238.90:<0.20086.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.661,ns_1@10.242.238.90:<0.20090.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.661,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20080.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20081.0>,<<"cut off">>,<<"cut off">>,[],43,false,false,0,
{1408,452589,659672},
completed,
{<0.20086.0>,#Ref<0.0.0.221519>},
<<"replication_ns_1@10.242.238.90">>,<0.20080.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.661,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20086.0>,{#Ref<0.0.0.221508>,<0.20090.0>}}
[error_logger:info,2014-08-19T16:49:49.661,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20090.0>},
{name,
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1015,1016,
1017,1018,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1007,1008,1010,1013,1015,
1016,1017,1018,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.665,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1015,1016,1017,1018,1019,1022,
1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.665,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20092.0>
[ns_server:debug,2014-08-19T16:49:49.668,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.671,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.671,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2628 us
[ns_server:debug,2014-08-19T16:49:49.671,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.672,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1018,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.674,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1014 state to replica
[ns_server:info,2014-08-19T16:49:49.674,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,1019,1022,1023] ([1014], [])
[ns_server:debug,2014-08-19T16:49:49.675,ns_1@10.242.238.90:<0.20093.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1014,1015,
1016,1017,1018,1019,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.221642>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,
1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.675,ns_1@10.242.238.90:<0.20093.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20090.0>
[ns_server:info,2014-08-19T16:49:49.675,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.679,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1010,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.679,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.679,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20095.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20095.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.680,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.20090.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.20093.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20090.0>
[ns_server:debug,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.20093.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.20097.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20090.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20092.0>,<<"cut off">>,<<"cut off">>,[],46,false,false,0,
{1408,452589,679924},
completed,
{<0.20093.0>,#Ref<0.0.0.221655>},
<<"replication_ns_1@10.242.238.90">>,<0.20090.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.681,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20093.0>,{#Ref<0.0.0.221644>,<0.20097.0>}}
[error_logger:info,2014-08-19T16:49:49.681,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20097.0>},
{name,
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1014,1015,
1016,1017,1018,1019,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1007,1008,1010,1013,1014,
1015,1016,1017,1018,1019,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.685,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,1019,
1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.685,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20098.0>
[ns_server:debug,2014-08-19T16:49:49.686,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.689,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2657 us
[ns_server:debug,2014-08-19T16:49:49.689,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.690,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.690,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1014,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.712,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.715,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.715,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3049 us
[ns_server:debug,2014-08-19T16:49:49.716,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.716,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{761,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.718,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1021 state to replica
[ns_server:info,2014-08-19T16:49:49.719,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,1019,1021,1022,
1023] ([1021], [])
[ns_server:debug,2014-08-19T16:49:49.721,ns_1@10.242.238.90:<0.20100.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1014,1015,
1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.221806>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,
1019,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.721,ns_1@10.242.238.90:<0.20100.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20097.0>
[ns_server:info,2014-08-19T16:49:49.721,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.730,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1010,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.730,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20103.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20103.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.731,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20097.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20100.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20097.0>
[ns_server:debug,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20100.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.732,ns_1@10.242.238.90:<0.20105.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.733,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20097.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20098.0>,<<"cut off">>,<<"cut off">>,[],49,false,false,0,
{1408,452589,730983},
completed,
{<0.20100.0>,#Ref<0.0.0.221819>},
<<"replication_ns_1@10.242.238.90">>,<0.20097.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.733,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20100.0>,{#Ref<0.0.0.221808>,<0.20105.0>}}
[error_logger:info,2014-08-19T16:49:49.733,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20105.0>},
{name,
{new_child_id,
[1004,1005,1006,1007,1008,1010,1013,1014,1015,
1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1007,1008,1010,1013,1014,
1015,1016,1017,1018,1019,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.736,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1007,1008,1010,1013,1014,1015,1016,1017,1018,1019,
1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.737,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20106.0>
[ns_server:debug,2014-08-19T16:49:49.737,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.745,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7952 us
[ns_server:debug,2014-08-19T16:49:49.745,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.746,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.747,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1021,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.748,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1009 state to replica
[ns_server:info,2014-08-19T16:49:49.749,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,1017,1018,1019,1021,
1022,1023] ([1009], [])
[ns_server:debug,2014-08-19T16:49:49.750,ns_1@10.242.238.90:<0.20107.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1004,1005,1006,1007,1008,1009,1010,1013,1014,
1015,1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.221942>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,1017,
1018,1019,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.750,ns_1@10.242.238.90:<0.20107.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20105.0>
[ns_server:info,2014-08-19T16:49:49.750,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.754,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.754,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20109.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.755,ns_1@10.242.238.90:<0.20109.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20105.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20107.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20105.0>
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20107.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20112.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.756,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20105.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20106.0>,<<"cut off">>,<<"cut off">>,[],52,false,false,0,
{1408,452589,755260},
completed,
{<0.20107.0>,#Ref<0.0.0.221955>},
<<"replication_ns_1@10.242.238.90">>,<0.20105.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.757,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20107.0>,{#Ref<0.0.0.221944>,<0.20112.0>}}
[error_logger:info,2014-08-19T16:49:49.757,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20112.0>},
{name,
{new_child_id,
[1004,1005,1006,1007,1008,1009,1010,1013,1014,
1015,1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1004,1005,1006,1007,1008,1009,1010,1013,
1014,1015,1016,1017,1018,1019,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.761,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,1017,1018,
1019,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.761,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20113.0>
[ns_server:debug,2014-08-19T16:49:49.763,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.767,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3295 us
[ns_server:debug,2014-08-19T16:49:49.767,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.767,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.768,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1009,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.770,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1003 state to replica
[ns_server:info,2014-08-19T16:49:49.770,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1003,1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,1017,1018,1019,
1021,1022,1023] ([1003], [])
[ns_server:debug,2014-08-19T16:49:49.777,ns_1@10.242.238.90:<0.20114.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1013,
1014,1015,1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.222085>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,
1017,1018,1019,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.777,ns_1@10.242.238.90:<0.20114.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20112.0>
[ns_server:info,2014-08-19T16:49:49.777,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.787,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.787,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20116.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20116.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.788,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.789,ns_1@10.242.238.90:<0.20112.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.789,ns_1@10.242.238.90:<0.20114.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20112.0>
[ns_server:debug,2014-08-19T16:49:49.789,ns_1@10.242.238.90:<0.20114.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.789,ns_1@10.242.238.90:<0.20118.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.789,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20112.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20113.0>,<<"cut off">>,<<"cut off">>,[],55,false,false,0,
{1408,452589,788040},
completed,
{<0.20114.0>,#Ref<0.0.0.222099>},
<<"replication_ns_1@10.242.238.90">>,<0.20112.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.790,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20114.0>,{#Ref<0.0.0.222087>,<0.20118.0>}}
[error_logger:info,2014-08-19T16:49:49.789,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20118.0>},
{name,
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1013,
1014,1015,1016,1017,1018,1019,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1003,1004,1005,1006,1007,1008,1009,1010,
1013,1014,1015,1016,1017,1018,1019,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.793,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1013,1014,1015,1016,1017,
1018,1019,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.794,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20120.0>
[ns_server:debug,2014-08-19T16:49:49.794,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.798,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3602 us
[ns_server:debug,2014-08-19T16:49:49.798,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.798,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.799,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1003,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.805,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1012 state to replica
[ns_server:info,2014-08-19T16:49:49.805,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,1016,1017,1018,
1019,1021,1022,1023] ([1012], [])
[ns_server:debug,2014-08-19T16:49:49.806,ns_1@10.242.238.90:<0.20121.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1012,
1013,1014,1015,1016,1017,1018,1019,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.222226>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,
1016,1017,1018,1019,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.806,ns_1@10.242.238.90:<0.20121.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20118.0>
[ns_server:info,2014-08-19T16:49:49.806,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.810,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20123.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.811,ns_1@10.242.238.90:<0.20123.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20118.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20121.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20118.0>
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20121.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20125.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.812,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20118.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20120.0>,<<"cut off">>,<<"cut off">>,[],58,false,false,0,
{1408,452589,811224},
completed,
{<0.20121.0>,#Ref<0.0.0.222239>},
<<"replication_ns_1@10.242.238.90">>,<0.20118.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[error_logger:info,2014-08-19T16:49:49.813,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20125.0>},
{name,
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1012,
1013,1014,1015,1016,1017,1018,1019,1021,1022,
1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1003,1004,1005,1006,1007,1008,1009,1010,
1012,1013,1014,1015,1016,1017,1018,1019,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.813,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20121.0>,{#Ref<0.0.0.222228>,<0.20125.0>}}
[ns_server:debug,2014-08-19T16:49:49.816,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,1016,
1017,1018,1019,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.817,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20126.0>
[ns_server:debug,2014-08-19T16:49:49.818,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.820,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.820,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1334 us
[ns_server:debug,2014-08-19T16:49:49.820,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.821,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1012,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.823,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1020 state to replica
[ns_server:info,2014-08-19T16:49:49.823,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023] ([1020], [])
[ns_server:debug,2014-08-19T16:49:49.826,ns_1@10.242.238.90:<0.20128.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.222360>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.827,ns_1@10.242.238.90:<0.20128.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20125.0>
[ns_server:info,2014-08-19T16:49:49.827,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.831,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.831,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.831,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.831,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20130.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20130.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20125.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.832,ns_1@10.242.238.90:<0.20128.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20125.0>
[ns_server:debug,2014-08-19T16:49:49.833,ns_1@10.242.238.90:<0.20128.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.833,ns_1@10.242.238.90:<0.20132.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.833,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20125.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20126.0>,<<"cut off">>,<<"cut off">>,[],61,false,false,0,
{1408,452589,831831},
completed,
{<0.20128.0>,#Ref<0.0.0.222373>},
<<"replication_ns_1@10.242.238.90">>,<0.20125.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.833,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20128.0>,{#Ref<0.0.0.222362>,<0.20132.0>}}
[error_logger:info,2014-08-19T16:49:49.833,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20132.0>},
{name,
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1003,1004,1005,1006,1007,1008,1009,1010,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.837,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.837,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20133.0>
[ns_server:debug,2014-08-19T16:49:49.837,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.840,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2906 us
[ns_server:debug,2014-08-19T16:49:49.841,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.842,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.843,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1020,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.847,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1011 state to replica
[ns_server:info,2014-08-19T16:49:49.847,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023] ([1011], [])
[ns_server:debug,2014-08-19T16:49:49.848,ns_1@10.242.238.90:<0.20134.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.222502>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.848,ns_1@10.242.238.90:<0.20134.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20132.0>
[ns_server:info,2014-08-19T16:49:49.849,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.852,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.853,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.853,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.853,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.853,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20136.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20136.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20132.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.854,ns_1@10.242.238.90:<0.20134.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20132.0>
[ns_server:debug,2014-08-19T16:49:49.855,ns_1@10.242.238.90:<0.20134.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.855,ns_1@10.242.238.90:<0.20138.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.855,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20132.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20133.0>,<<"cut off">>,<<"cut off">>,[],64,false,false,0,
{1408,452589,853680},
completed,
{<0.20134.0>,#Ref<0.0.0.222516>},
<<"replication_ns_1@10.242.238.90">>,<0.20132.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.855,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20134.0>,{#Ref<0.0.0.222504>,<0.20138.0>}}
[error_logger:info,2014-08-19T16:49:49.855,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20138.0>},
{name,
{new_child_id,
[1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.862,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[ns_server:debug,2014-08-19T16:49:49.862,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:49:49.862,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20140.0>
[ns_server:debug,2014-08-19T16:49:49.864,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.864,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2471 us
[ns_server:debug,2014-08-19T16:49:49.865,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.866,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1011,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.871,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1002 state to replica
[ns_server:info,2014-08-19T16:49:49.872,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023] ([1002], [])
[ns_server:debug,2014-08-19T16:49:49.874,ns_1@10.242.238.90:<0.20141.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.222640>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:49:49.875,ns_1@10.242.238.90:<0.20141.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20138.0>
[ns_server:info,2014-08-19T16:49:49.875,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:49:49.878,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:49:49.879,ns_1@10.242.238.90:<0.20143.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20143.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20138.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:49:49.880,ns_1@10.242.238.90:<0.20141.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20138.0>
[ns_server:debug,2014-08-19T16:49:49.881,ns_1@10.242.238.90:<0.20141.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:49:49.881,ns_1@10.242.238.90:<0.20145.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:49:49.881,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20138.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20140.0>,<<"cut off">>,<<"cut off">>,[],67,false,false,0,
{1408,452589,879469},
completed,
{<0.20141.0>,#Ref<0.0.0.222653>},
<<"replication_ns_1@10.242.238.90">>,<0.20138.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:49:49.881,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.20141.0>,{#Ref<0.0.0.222642>,<0.20145.0>}}
[error_logger:info,2014-08-19T16:49:49.881,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.20145.0>},
{name,
{new_child_id,
[1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:49:49.884,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:49:49.885,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20146.0>
[ns_server:debug,2014-08-19T16:49:49.885,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.893,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.894,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7356 us
[ns_server:debug,2014-08-19T16:49:49.894,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.894,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1002,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:49:49.914,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:49:49.918,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2255 us
[ns_server:debug,2014-08-19T16:49:49.918,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.919,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:49:49.919,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{756,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:49:49.933,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1001 state to replica
[ns_server:info,2014-08-19T16:49:49.937,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1001 to state replica
[ns_server:debug,2014-08-19T16:49:49.977,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1001_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:49.978,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1001]},
{checkpoints,[{1001,0}]},
{name,<<"replication_building_1001_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1001]},
{takeover,false},
{suffix,"building_1001_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1001,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:49.979,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20150.0>
[rebalance:debug,2014-08-19T16:49:49.979,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:49.980,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.29965.0>,#Ref<16550.0.1.52364>}]}
[rebalance:info,2014-08-19T16:49:49.980,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1001
[rebalance:debug,2014-08-19T16:49:49.981,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.29965.0>,#Ref<16550.0.1.52364>}]
[ns_server:debug,2014-08-19T16:49:49.982,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:49.998,ns_1@10.242.238.90:<0.20165.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1001
[ns_server:info,2014-08-19T16:49:50.004,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 746 state to replica
[ns_server:info,2014-08-19T16:49:50.010,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 746 to state replica
[ns_server:debug,2014-08-19T16:49:50.058,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_746_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.059,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[746]},
{checkpoints,[{746,0}]},
{name,<<"replication_building_746_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[746]},
{takeover,false},
{suffix,"building_746_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",746,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:50.060,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20169.0>
[rebalance:debug,2014-08-19T16:49:50.060,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.061,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.29985.0>,#Ref<16550.0.1.52490>}]}
[rebalance:info,2014-08-19T16:49:50.061,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 746
[rebalance:debug,2014-08-19T16:49:50.061,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.29985.0>,#Ref<16550.0.1.52490>}]
[ns_server:debug,2014-08-19T16:49:50.062,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:50.062,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20170.0> (ok)
[rebalance:debug,2014-08-19T16:49:50.063,ns_1@10.242.238.90:<0.20171.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 746
[ns_server:debug,2014-08-19T16:49:50.075,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1001. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.075,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1001. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.075,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1001,replica,0}
[ns_server:debug,2014-08-19T16:49:50.075,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,756,1019,1003,759,1022,1006,762,1009,765,749,1012,752,
1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,1017,
1001,757,1020,1004,760,1023,1007,763,747,1010]
[ns_server:info,2014-08-19T16:49:50.145,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1000 state to replica
[ns_server:info,2014-08-19T16:49:50.149,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 1000 to state replica
[views:debug,2014-08-19T16:49:50.159,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1001. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.159,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1001,replica,0}
[ns_server:debug,2014-08-19T16:49:50.185,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_1000_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.187,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[1000]},
{checkpoints,[{1000,0}]},
{name,<<"replication_building_1000_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[1000]},
{takeover,false},
{suffix,"building_1000_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",1000,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:50.188,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20175.0>
[rebalance:debug,2014-08-19T16:49:50.188,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.188,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30042.0>,#Ref<16550.0.1.52791>}]}
[rebalance:info,2014-08-19T16:49:50.189,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 1000
[rebalance:debug,2014-08-19T16:49:50.189,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30042.0>,#Ref<16550.0.1.52791>}]
[ns_server:debug,2014-08-19T16:49:50.190,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.206,ns_1@10.242.238.90:<0.20176.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1000
[ns_server:info,2014-08-19T16:49:50.212,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 745 state to replica
[ns_server:info,2014-08-19T16:49:50.219,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 745 to state replica
[ns_server:debug,2014-08-19T16:49:50.269,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_745_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.271,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[745]},
{checkpoints,[{745,0}]},
{name,<<"replication_building_745_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[745]},
{takeover,false},
{suffix,"building_745_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",745,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:50.271,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20194.0>
[rebalance:debug,2014-08-19T16:49:50.271,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.272,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30062.0>,#Ref<16550.0.1.52907>}]}
[rebalance:info,2014-08-19T16:49:50.272,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 745
[rebalance:debug,2014-08-19T16:49:50.273,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30062.0>,#Ref<16550.0.1.52907>}]
[ns_server:debug,2014-08-19T16:49:50.273,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20195.0> (ok)
[ns_server:debug,2014-08-19T16:49:50.273,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.275,ns_1@10.242.238.90:<0.20196.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 745
[ns_server:debug,2014-08-19T16:49:50.285,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 1000. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.285,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1000. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.285,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1000,replica,0}
[ns_server:debug,2014-08-19T16:49:50.286,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,1009,765,749,1012,
752,1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,754,
1017,1001,757,1020,1004,760,1023,1007,763,747,1010]
[ns_server:info,2014-08-19T16:49:50.348,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 999 state to replica
[ns_server:info,2014-08-19T16:49:50.352,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 999 to state replica
[views:debug,2014-08-19T16:49:50.361,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/1000. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.361,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",1000,replica,0}
[ns_server:debug,2014-08-19T16:49:50.388,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_999_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.389,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[999]},
{checkpoints,[{999,0}]},
{name,<<"replication_building_999_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[999]},
{takeover,false},
{suffix,"building_999_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",999,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:50.390,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20200.0>
[rebalance:debug,2014-08-19T16:49:50.390,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.391,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30120.0>,#Ref<16550.0.1.53195>}]}
[rebalance:info,2014-08-19T16:49:50.391,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 999
[rebalance:debug,2014-08-19T16:49:50.391,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30120.0>,#Ref<16550.0.1.53195>}]
[ns_server:debug,2014-08-19T16:49:50.392,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.412,ns_1@10.242.238.90:<0.20215.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 999
[ns_server:info,2014-08-19T16:49:50.418,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 744 state to replica
[ns_server:info,2014-08-19T16:49:50.424,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 744 to state replica
[ns_server:debug,2014-08-19T16:49:50.443,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 746. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.443,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/746. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.443,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",746,pending,0}
[ns_server:debug,2014-08-19T16:49:50.444,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,752,1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,
754,1017,1001,757,1020,1004,760,1023,1007,763,747,1010]
[ns_server:debug,2014-08-19T16:49:50.471,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_744_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.473,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[744]},
{checkpoints,[{744,0}]},
{name,<<"replication_building_744_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[744]},
{takeover,false},
{suffix,"building_744_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",744,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:50.474,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20219.0>
[rebalance:debug,2014-08-19T16:49:50.474,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.474,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30140.0>,#Ref<16550.0.1.53336>}]}
[rebalance:info,2014-08-19T16:49:50.475,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 744
[rebalance:debug,2014-08-19T16:49:50.475,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30140.0>,#Ref<16550.0.1.53336>}]
[ns_server:debug,2014-08-19T16:49:50.475,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20220.0> (ok)
[ns_server:debug,2014-08-19T16:49:50.476,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:50.476,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/746. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.476,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",746,pending,0}
[rebalance:debug,2014-08-19T16:49:50.477,ns_1@10.242.238.90:<0.20221.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 744
[rebalance:debug,2014-08-19T16:49:50.477,ns_1@10.242.238.90:<0.20171.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:50.477,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20171.0> (ok)
[ns_server:info,2014-08-19T16:49:50.549,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 998 state to replica
[ns_server:info,2014-08-19T16:49:50.553,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 998 to state replica
[ns_server:debug,2014-08-19T16:49:50.577,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 744. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.577,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/744. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.577,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",744,pending,0}
[ns_server:debug,2014-08-19T16:49:50.577,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,752,1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,1014,
754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,1010]
[ns_server:debug,2014-08-19T16:49:50.586,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_998_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.588,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[998]},
{checkpoints,[{998,0}]},
{name,<<"replication_building_998_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[998]},
{takeover,false},
{suffix,"building_998_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",998,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:50.588,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20239.0>
[rebalance:debug,2014-08-19T16:49:50.589,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.589,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30197.0>,#Ref<16550.0.1.53601>}]}
[rebalance:info,2014-08-19T16:49:50.589,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 998
[rebalance:debug,2014-08-19T16:49:50.590,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30197.0>,#Ref<16550.0.1.53601>}]
[ns_server:debug,2014-08-19T16:49:50.591,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.608,ns_1@10.242.238.90:<0.20240.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 998
[views:debug,2014-08-19T16:49:50.611,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/744. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.611,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",744,pending,0}
[ns_server:info,2014-08-19T16:49:50.614,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 743 state to replica
[ns_server:info,2014-08-19T16:49:50.620,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 743 to state replica
[ns_server:debug,2014-08-19T16:49:50.669,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_743_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.671,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[743]},
{checkpoints,[{743,0}]},
{name,<<"replication_building_743_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[743]},
{takeover,false},
{suffix,"building_743_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",743,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:50.671,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20258.0>
[rebalance:debug,2014-08-19T16:49:50.672,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.672,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30217.0>,#Ref<16550.0.1.53695>}]}
[rebalance:info,2014-08-19T16:49:50.672,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 743
[rebalance:debug,2014-08-19T16:49:50.673,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30217.0>,#Ref<16550.0.1.53695>}]
[ns_server:debug,2014-08-19T16:49:50.673,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20259.0> (ok)
[ns_server:debug,2014-08-19T16:49:50.673,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.675,ns_1@10.242.238.90:<0.20260.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 743
[ns_server:debug,2014-08-19T16:49:50.711,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 999. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.711,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/999. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.711,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",999,replica,0}
[ns_server:debug,2014-08-19T16:49:50.712,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,999,752,1015,755,1018,1002,758,1021,1005,761,1008,764,748,1011,767,751,
1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,1010]
[ns_server:info,2014-08-19T16:49:50.748,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 997 state to replica
[ns_server:info,2014-08-19T16:49:50.752,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 997 to state replica
[views:debug,2014-08-19T16:49:50.762,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/999. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:50.762,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",999,replica,0}
[ns_server:debug,2014-08-19T16:49:50.791,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_997_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.793,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[997]},
{checkpoints,[{997,0}]},
{name,<<"replication_building_997_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[997]},
{takeover,false},
{suffix,"building_997_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",997,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:50.794,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20264.0>
[rebalance:debug,2014-08-19T16:49:50.794,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.794,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30274.0>,#Ref<16550.0.1.53960>}]}
[rebalance:info,2014-08-19T16:49:50.794,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 997
[rebalance:debug,2014-08-19T16:49:50.795,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30274.0>,#Ref<16550.0.1.53960>}]
[ns_server:debug,2014-08-19T16:49:50.796,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.812,ns_1@10.242.238.90:<0.20270.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 997
[ns_server:info,2014-08-19T16:49:50.818,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 742 state to replica
[ns_server:info,2014-08-19T16:49:50.824,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 742 to state replica
[ns_server:debug,2014-08-19T16:49:50.845,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 745. Nacking mccouch update.
[views:debug,2014-08-19T16:49:50.845,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/745. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.846,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",745,pending,0}
[ns_server:debug,2014-08-19T16:49:50.846,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,999,752,1015,755,1018,1002,758,1021,1005,761,745,1008,764,748,1011,767,
751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,1010]
[ns_server:debug,2014-08-19T16:49:50.876,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_742_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.877,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[742]},
{checkpoints,[{742,0}]},
{name,<<"replication_building_742_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[742]},
{takeover,false},
{suffix,"building_742_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",742,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:50.878,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20283.0>
[rebalance:debug,2014-08-19T16:49:50.878,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.878,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30294.0>,#Ref<16550.0.1.54075>}]}
[rebalance:info,2014-08-19T16:49:50.879,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 742
[rebalance:debug,2014-08-19T16:49:50.879,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30294.0>,#Ref<16550.0.1.54075>}]
[views:debug,2014-08-19T16:49:50.879,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/745. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:50.879,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",745,pending,0}
[ns_server:debug,2014-08-19T16:49:50.879,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20284.0> (ok)
[ns_server:debug,2014-08-19T16:49:50.879,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:50.881,ns_1@10.242.238.90:<0.20285.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 742
[ns_server:info,2014-08-19T16:49:50.954,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 996 state to replica
[ns_server:info,2014-08-19T16:49:50.957,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 996 to state replica
[ns_server:debug,2014-08-19T16:49:50.995,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_996_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:50.996,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[996]},
{checkpoints,[{996,0}]},
{name,<<"replication_building_996_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[996]},
{takeover,false},
{suffix,"building_996_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",996,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:50.997,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20303.0>
[rebalance:debug,2014-08-19T16:49:50.997,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:50.998,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30351.0>,#Ref<16550.0.1.54340>}]}
[rebalance:info,2014-08-19T16:49:50.998,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 996
[rebalance:debug,2014-08-19T16:49:50.998,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30351.0>,#Ref<16550.0.1.54340>}]
[ns_server:debug,2014-08-19T16:49:50.999,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:51.003,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 998. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.003,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/998. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.003,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",998,replica,0}
[ns_server:debug,2014-08-19T16:49:51.003,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,999,752,1015,755,1018,1002,758,1021,1005,761,745,1008,764,748,1011,998,
767,751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,1010]
[rebalance:debug,2014-08-19T16:49:51.019,ns_1@10.242.238.90:<0.20304.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 996
[ns_server:info,2014-08-19T16:49:51.025,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 741 state to replica
[ns_server:info,2014-08-19T16:49:51.031,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 741 to state replica
[views:debug,2014-08-19T16:49:51.053,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/998. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.053,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",998,replica,0}
[ns_server:debug,2014-08-19T16:49:51.079,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_741_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.081,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[741]},
{checkpoints,[{741,0}]},
{name,<<"replication_building_741_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[741]},
{takeover,false},
{suffix,"building_741_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",741,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:51.082,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20308.0>
[rebalance:debug,2014-08-19T16:49:51.082,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.082,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30371.0>,#Ref<16550.0.1.54477>}]}
[rebalance:info,2014-08-19T16:49:51.082,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 741
[rebalance:debug,2014-08-19T16:49:51.083,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30371.0>,#Ref<16550.0.1.54477>}]
[ns_server:debug,2014-08-19T16:49:51.083,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20309.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.083,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.085,ns_1@10.242.238.90:<0.20310.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 741
[ns_server:info,2014-08-19T16:49:51.156,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 995 state to replica
[ns_server:info,2014-08-19T16:49:51.160,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 995 to state replica
[ns_server:debug,2014-08-19T16:49:51.178,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 742. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.179,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/742. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.179,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",742,pending,0}
[ns_server:debug,2014-08-19T16:49:51.179,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,749,
1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,764,748,1011,
998,767,751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,1010]
[ns_server:debug,2014-08-19T16:49:51.195,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_995_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.196,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[995]},
{checkpoints,[{995,0}]},
{name,<<"replication_building_995_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[995]},
{takeover,false},
{suffix,"building_995_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",995,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:51.197,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20328.0>
[rebalance:debug,2014-08-19T16:49:51.197,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.198,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30437.0>,#Ref<16550.0.1.54794>}]}
[rebalance:info,2014-08-19T16:49:51.198,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 995
[rebalance:debug,2014-08-19T16:49:51.198,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30437.0>,#Ref<16550.0.1.54794>}]
[ns_server:debug,2014-08-19T16:49:51.199,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.219,ns_1@10.242.238.90:<0.20329.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 995
[ns_server:info,2014-08-19T16:49:51.225,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 740 state to replica
[ns_server:info,2014-08-19T16:49:51.230,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 740 to state replica
[views:debug,2014-08-19T16:49:51.246,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/742. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.246,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",742,pending,0}
[rebalance:debug,2014-08-19T16:49:51.247,ns_1@10.242.238.90:<0.20215.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:51.247,ns_1@10.242.238.90:<0.20176.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:51.247,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20215.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.247,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20176.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.279,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_740_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.281,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[740]},
{checkpoints,[{740,0}]},
{name,<<"replication_building_740_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[740]},
{takeover,false},
{suffix,"building_740_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",740,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:51.281,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20333.0>
[rebalance:debug,2014-08-19T16:49:51.282,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.282,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30457.0>,#Ref<16550.0.1.54910>}]}
[rebalance:info,2014-08-19T16:49:51.282,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 740
[rebalance:debug,2014-08-19T16:49:51.283,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30457.0>,#Ref<16550.0.1.54910>}]
[ns_server:debug,2014-08-19T16:49:51.283,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20334.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.283,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.285,ns_1@10.242.238.90:<0.20335.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 740
[ns_server:info,2014-08-19T16:49:51.359,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 994 state to replica
[ns_server:info,2014-08-19T16:49:51.363,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 994 to state replica
[ns_server:debug,2014-08-19T16:49:51.397,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_994_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.398,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[994]},
{checkpoints,[{994,0}]},
{name,<<"replication_building_994_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[994]},
{takeover,false},
{suffix,"building_994_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",994,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:51.399,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20359.0>
[rebalance:debug,2014-08-19T16:49:51.399,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.399,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30524.0>,#Ref<16550.0.1.55226>}]}
[rebalance:info,2014-08-19T16:49:51.400,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 994
[rebalance:debug,2014-08-19T16:49:51.400,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30524.0>,#Ref<16550.0.1.55226>}]
[ns_server:debug,2014-08-19T16:49:51.400,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.420,ns_1@10.242.238.90:<0.20360.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 994
[ns_server:info,2014-08-19T16:49:51.426,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 739 state to replica
[ns_server:info,2014-08-19T16:49:51.433,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 739 to state replica
[ns_server:debug,2014-08-19T16:49:51.446,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 997. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.446,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/997. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.446,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",997,replica,0}
[ns_server:debug,2014-08-19T16:49:51.446,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,
749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,764,748,
1011,998,767,751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,747,
1010]
[ns_server:debug,2014-08-19T16:49:51.484,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_739_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.485,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[739]},
{checkpoints,[{739,0}]},
{name,<<"replication_building_739_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[739]},
{takeover,false},
{suffix,"building_739_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",739,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:51.486,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20365.0>
[rebalance:debug,2014-08-19T16:49:51.486,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.487,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30544.0>,#Ref<16550.0.1.55343>}]}
[rebalance:info,2014-08-19T16:49:51.487,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 739
[rebalance:debug,2014-08-19T16:49:51.487,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30544.0>,#Ref<16550.0.1.55343>}]
[ns_server:debug,2014-08-19T16:49:51.488,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:51.488,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20366.0> (ok)
[rebalance:debug,2014-08-19T16:49:51.489,ns_1@10.242.238.90:<0.20367.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 739
[views:debug,2014-08-19T16:49:51.522,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/997. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.522,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",997,replica,0}
[ns_server:info,2014-08-19T16:49:51.563,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 993 state to replica
[ns_server:info,2014-08-19T16:49:51.567,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 993 to state replica
[ns_server:debug,2014-08-19T16:49:51.602,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_993_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.603,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[993]},
{checkpoints,[{993,0}]},
{name,<<"replication_building_993_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[993]},
{takeover,false},
{suffix,"building_993_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",993,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:51.604,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20385.0>
[rebalance:debug,2014-08-19T16:49:51.604,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.605,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30602.0>,#Ref<16550.0.1.55641>}]}
[rebalance:info,2014-08-19T16:49:51.605,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 993
[rebalance:debug,2014-08-19T16:49:51.605,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30602.0>,#Ref<16550.0.1.55641>}]
[ns_server:debug,2014-08-19T16:49:51.606,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.624,ns_1@10.242.238.90:<0.20386.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 993
[ns_server:info,2014-08-19T16:49:51.629,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 738 state to replica
[ns_server:info,2014-08-19T16:49:51.636,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 738 to state replica
[ns_server:debug,2014-08-19T16:49:51.647,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 995. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.647,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/995. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.647,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",995,replica,0}
[ns_server:debug,2014-08-19T16:49:51.647,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,1022,1006,762,746,1009,765,
749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,995,764,
748,1011,998,767,751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,763,
747,1010]
[ns_server:debug,2014-08-19T16:49:51.685,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_738_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.686,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[738]},
{checkpoints,[{738,0}]},
{name,<<"replication_building_738_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[738]},
{takeover,false},
{suffix,"building_738_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",738,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:51.687,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20390.0>
[rebalance:debug,2014-08-19T16:49:51.687,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.688,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30636.0>,#Ref<16550.0.1.55789>}]}
[rebalance:info,2014-08-19T16:49:51.688,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 738
[rebalance:debug,2014-08-19T16:49:51.688,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30636.0>,#Ref<16550.0.1.55789>}]
[ns_server:debug,2014-08-19T16:49:51.689,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20391.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.689,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.691,ns_1@10.242.238.90:<0.20392.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 738
[views:debug,2014-08-19T16:49:51.714,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/995. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:51.714,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",995,replica,0}
[ns_server:info,2014-08-19T16:49:51.764,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 992 state to replica
[ns_server:info,2014-08-19T16:49:51.768,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 992 to state replica
[ns_server:debug,2014-08-19T16:49:51.803,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_992_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.804,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[992]},
{checkpoints,[{992,0}]},
{name,<<"replication_building_992_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[992]},
{takeover,false},
{suffix,"building_992_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",992,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:51.805,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20410.0>
[rebalance:debug,2014-08-19T16:49:51.805,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.806,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30679.0>,#Ref<16550.0.1.56024>}]}
[rebalance:info,2014-08-19T16:49:51.806,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 992
[rebalance:debug,2014-08-19T16:49:51.806,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30679.0>,#Ref<16550.0.1.56024>}]
[ns_server:debug,2014-08-19T16:49:51.807,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:51.813,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 743. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.813,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/743. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.813,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",743,pending,0}
[ns_server:debug,2014-08-19T16:49:51.813,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,743,1022,1006,762,746,1009,
765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,995,
764,748,1011,998,767,751,1014,754,1017,1001,757,1020,1004,760,744,1023,1007,
763,747,1010]
[rebalance:debug,2014-08-19T16:49:51.825,ns_1@10.242.238.90:<0.20411.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 992
[ns_server:info,2014-08-19T16:49:51.830,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 737 state to replica
[ns_server:info,2014-08-19T16:49:51.837,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 737 to state replica
[views:debug,2014-08-19T16:49:51.847,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/743. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.847,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",743,pending,0}
[ns_server:debug,2014-08-19T16:49:51.884,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_737_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:51.886,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[737]},
{checkpoints,[{737,0}]},
{name,<<"replication_building_737_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[737]},
{takeover,false},
{suffix,"building_737_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",737,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:51.887,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20415.0>
[rebalance:debug,2014-08-19T16:49:51.887,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:51.887,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30713.0>,#Ref<16550.0.1.56167>}]}
[rebalance:info,2014-08-19T16:49:51.887,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 737
[rebalance:debug,2014-08-19T16:49:51.888,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30713.0>,#Ref<16550.0.1.56167>}]
[ns_server:debug,2014-08-19T16:49:51.888,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20416.0> (ok)
[ns_server:debug,2014-08-19T16:49:51.888,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:51.890,ns_1@10.242.238.90:<0.20417.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 737
[ns_server:debug,2014-08-19T16:49:51.939,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 741. Nacking mccouch update.
[views:debug,2014-08-19T16:49:51.939,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/741. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.939,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",741,pending,0}
[ns_server:debug,2014-08-19T16:49:51.939,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,743,1022,1006,762,746,1009,
765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,995,
764,748,1011,998,767,751,1014,754,1017,1001,757,741,1020,1004,760,744,1023,
1007,763,747,1010]
[ns_server:info,2014-08-19T16:49:51.969,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 991 state to replica
[views:debug,2014-08-19T16:49:51.973,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/741. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:51.973,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",741,pending,0}
[ns_server:info,2014-08-19T16:49:51.973,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 991 to state replica
[ns_server:debug,2014-08-19T16:49:52.007,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_991_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.009,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[991]},
{checkpoints,[{991,0}]},
{name,<<"replication_building_991_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[991]},
{takeover,false},
{suffix,"building_991_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",991,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:52.010,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20435.0>
[rebalance:debug,2014-08-19T16:49:52.010,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.010,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30756.0>,#Ref<16550.0.1.56405>}]}
[rebalance:info,2014-08-19T16:49:52.010,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 991
[rebalance:debug,2014-08-19T16:49:52.011,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30756.0>,#Ref<16550.0.1.56405>}]
[ns_server:debug,2014-08-19T16:49:52.012,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.031,ns_1@10.242.238.90:<0.20436.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 991
[ns_server:info,2014-08-19T16:49:52.037,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 736 state to replica
[ns_server:info,2014-08-19T16:49:52.045,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 736 to state replica
[ns_server:debug,2014-08-19T16:49:52.073,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 996. Nacking mccouch update.
[views:debug,2014-08-19T16:49:52.073,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/996. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.074,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",996,replica,0}
[ns_server:debug,2014-08-19T16:49:52.074,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,743,1022,1006,762,746,1009,
996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,
995,764,748,1011,998,767,751,1014,754,1017,1001,757,741,1020,1004,760,744,
1023,1007,763,747,1010]
[ns_server:debug,2014-08-19T16:49:52.094,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_736_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.096,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[736]},
{checkpoints,[{736,0}]},
{name,<<"replication_building_736_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[736]},
{takeover,false},
{suffix,"building_736_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",736,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:52.096,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20454.0>
[rebalance:debug,2014-08-19T16:49:52.096,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.097,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30790.0>,#Ref<16550.0.1.56547>}]}
[rebalance:info,2014-08-19T16:49:52.097,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 736
[rebalance:debug,2014-08-19T16:49:52.097,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30790.0>,#Ref<16550.0.1.56547>}]
[ns_server:debug,2014-08-19T16:49:52.098,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20455.0> (ok)
[ns_server:debug,2014-08-19T16:49:52.098,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.099,ns_1@10.242.238.90:<0.20456.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 736
[views:debug,2014-08-19T16:49:52.107,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/996. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.107,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",996,replica,0}
[ns_server:info,2014-08-19T16:49:52.175,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 990 state to replica
[ns_server:info,2014-08-19T16:49:52.179,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 990 to state replica
[ns_server:debug,2014-08-19T16:49:52.191,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 994. Nacking mccouch update.
[views:debug,2014-08-19T16:49:52.191,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/994. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.192,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",994,replica,0}
[ns_server:debug,2014-08-19T16:49:52.192,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,743,1022,1006,762,746,1009,
996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,761,745,1008,
995,764,748,1011,998,767,751,1014,754,1017,1001,757,741,1020,1004,760,744,
1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:52.213,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_990_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.214,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[990]},
{checkpoints,[{990,0}]},
{name,<<"replication_building_990_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[990]},
{takeover,false},
{suffix,"building_990_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",990,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:52.215,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20474.0>
[rebalance:debug,2014-08-19T16:49:52.215,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.216,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30833.0>,#Ref<16550.0.1.56775>}]}
[rebalance:info,2014-08-19T16:49:52.216,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 990
[rebalance:debug,2014-08-19T16:49:52.216,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30833.0>,#Ref<16550.0.1.56775>}]
[ns_server:debug,2014-08-19T16:49:52.217,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:52.225,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/994. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.225,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",994,replica,0}
[rebalance:debug,2014-08-19T16:49:52.235,ns_1@10.242.238.90:<0.20475.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 990
[ns_server:info,2014-08-19T16:49:52.242,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 735 state to replica
[ns_server:info,2014-08-19T16:49:52.248,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 735 to state replica
[ns_server:debug,2014-08-19T16:49:52.298,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_735_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.299,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[735]},
{checkpoints,[{735,0}]},
{name,<<"replication_building_735_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[735]},
{takeover,false},
{suffix,"building_735_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",735,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:52.300,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20493.0>
[rebalance:debug,2014-08-19T16:49:52.300,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.300,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30853.0>,#Ref<16550.0.1.56889>}]}
[rebalance:info,2014-08-19T16:49:52.300,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 735
[rebalance:debug,2014-08-19T16:49:52.301,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30853.0>,#Ref<16550.0.1.56889>}]
[ns_server:debug,2014-08-19T16:49:52.301,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20494.0> (ok)
[ns_server:debug,2014-08-19T16:49:52.301,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.303,ns_1@10.242.238.90:<0.20495.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 735
[ns_server:debug,2014-08-19T16:49:52.323,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 992. Nacking mccouch update.
[views:debug,2014-08-19T16:49:52.323,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/992. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.323,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",992,replica,0}
[ns_server:debug,2014-08-19T16:49:52.323,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,1019,1003,759,743,1022,1006,762,746,1009,
996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,992,761,745,
1008,995,764,748,1011,998,767,751,1014,754,1017,1001,757,741,1020,1004,760,
744,1023,1007,994,763,747,1010]
[ns_server:info,2014-08-19T16:49:52.375,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 989 state to replica
[ns_server:info,2014-08-19T16:49:52.379,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 989 to state replica
[views:debug,2014-08-19T16:49:52.399,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/992. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:52.399,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",992,replica,0}
[ns_server:debug,2014-08-19T16:49:52.412,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_989_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.414,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[989]},
{checkpoints,[{989,0}]},
{name,<<"replication_building_989_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[989]},
{takeover,false},
{suffix,"building_989_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",989,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:52.415,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20499.0>
[rebalance:debug,2014-08-19T16:49:52.415,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.415,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30910.0>,#Ref<16550.0.1.57170>}]}
[rebalance:info,2014-08-19T16:49:52.415,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 989
[rebalance:debug,2014-08-19T16:49:52.416,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30910.0>,#Ref<16550.0.1.57170>}]
[ns_server:debug,2014-08-19T16:49:52.417,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.436,ns_1@10.242.238.90:<0.20500.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 989
[ns_server:info,2014-08-19T16:49:52.442,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 734 state to replica
[ns_server:info,2014-08-19T16:49:52.448,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 734 to state replica
[ns_server:debug,2014-08-19T16:49:52.497,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_734_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.498,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[734]},
{checkpoints,[{734,0}]},
{name,<<"replication_building_734_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[734]},
{takeover,false},
{suffix,"building_734_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",734,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:52.499,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20518.0>
[rebalance:debug,2014-08-19T16:49:52.499,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.500,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30930.0>,#Ref<16550.0.1.57288>}]}
[rebalance:info,2014-08-19T16:49:52.500,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 734
[rebalance:debug,2014-08-19T16:49:52.500,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30930.0>,#Ref<16550.0.1.57288>}]
[ns_server:debug,2014-08-19T16:49:52.501,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20519.0> (ok)
[ns_server:debug,2014-08-19T16:49:52.501,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.503,ns_1@10.242.238.90:<0.20520.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 734
[ns_server:debug,2014-08-19T16:49:52.549,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 740. Nacking mccouch update.
[views:debug,2014-08-19T16:49:52.549,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/740. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:52.549,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",740,pending,0}
[ns_server:debug,2014-08-19T16:49:52.549,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,740,1019,1003,759,743,1022,1006,762,746,
1009,996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,992,761,
745,1008,995,764,748,1011,998,767,751,1014,754,1017,1001,757,741,1020,1004,
760,744,1023,1007,994,763,747,1010]
[ns_server:info,2014-08-19T16:49:52.578,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 988 state to replica
[ns_server:info,2014-08-19T16:49:52.582,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 988 to state replica
[ns_server:debug,2014-08-19T16:49:52.616,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_988_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.617,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[988]},
{checkpoints,[{988,0}]},
{name,<<"replication_building_988_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[988]},
{takeover,false},
{suffix,"building_988_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",988,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:52.618,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20524.0>
[rebalance:debug,2014-08-19T16:49:52.618,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.619,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.30987.0>,#Ref<16550.0.1.58507>}]}
[rebalance:info,2014-08-19T16:49:52.619,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 988
[rebalance:debug,2014-08-19T16:49:52.619,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.30987.0>,#Ref<16550.0.1.58507>}]
[ns_server:debug,2014-08-19T16:49:52.621,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:52.624,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/740. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:52.624,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",740,pending,0}
[rebalance:debug,2014-08-19T16:49:52.639,ns_1@10.242.238.90:<0.20525.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 988
[ns_server:info,2014-08-19T16:49:52.646,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 733 state to replica
[ns_server:info,2014-08-19T16:49:52.652,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 733 to state replica
[ns_server:debug,2014-08-19T16:49:52.701,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_733_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.702,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[733]},
{checkpoints,[{733,0}]},
{name,<<"replication_building_733_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[733]},
{takeover,false},
{suffix,"building_733_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",733,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:52.703,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20543.0>
[rebalance:debug,2014-08-19T16:49:52.703,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.703,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31007.0>,#Ref<16550.0.1.58645>}]}
[rebalance:info,2014-08-19T16:49:52.704,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 733
[rebalance:debug,2014-08-19T16:49:52.704,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31007.0>,#Ref<16550.0.1.58645>}]
[ns_server:debug,2014-08-19T16:49:52.704,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20544.0> (ok)
[ns_server:debug,2014-08-19T16:49:52.705,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.706,ns_1@10.242.238.90:<0.20545.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 733
[ns_server:info,2014-08-19T16:49:52.779,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 987 state to replica
[ns_server:info,2014-08-19T16:49:52.782,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 987 to state replica
[ns_server:debug,2014-08-19T16:49:52.783,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 738. Nacking mccouch update.
[views:debug,2014-08-19T16:49:52.783,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/738. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:52.783,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",738,pending,0}
[ns_server:debug,2014-08-19T16:49:52.783,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,740,1019,1003,759,743,1022,1006,762,746,
1009,996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,992,761,
745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,1001,757,741,1020,
1004,760,744,1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:52.817,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_987_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.818,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[987]},
{checkpoints,[{987,0}]},
{name,<<"replication_building_987_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[987]},
{takeover,false},
{suffix,"building_987_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",987,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:52.819,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20550.0>
[rebalance:debug,2014-08-19T16:49:52.819,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.820,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31064.0>,#Ref<16550.0.1.58908>}]}
[rebalance:info,2014-08-19T16:49:52.820,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 987
[rebalance:debug,2014-08-19T16:49:52.820,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31064.0>,#Ref<16550.0.1.58908>}]
[ns_server:debug,2014-08-19T16:49:52.821,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.841,ns_1@10.242.238.90:<0.20551.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 987
[ns_server:info,2014-08-19T16:49:52.847,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 732 state to replica
[ns_server:info,2014-08-19T16:49:52.853,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 732 to state replica
[views:debug,2014-08-19T16:49:52.867,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/738. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:52.867,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",738,pending,0}
[ns_server:debug,2014-08-19T16:49:52.907,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_732_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:52.909,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[732]},
{checkpoints,[{732,0}]},
{name,<<"replication_building_732_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[732]},
{takeover,false},
{suffix,"building_732_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",732,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:52.909,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20555.0>
[rebalance:debug,2014-08-19T16:49:52.909,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:52.910,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31098.0>,#Ref<16550.0.1.59074>}]}
[rebalance:info,2014-08-19T16:49:52.910,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 732
[rebalance:debug,2014-08-19T16:49:52.910,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31098.0>,#Ref<16550.0.1.59074>}]
[ns_server:debug,2014-08-19T16:49:52.911,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20556.0> (ok)
[ns_server:debug,2014-08-19T16:49:52.912,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:52.912,ns_1@10.242.238.90:<0.20557.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 732
[ns_server:info,2014-08-19T16:49:52.993,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 986 state to replica
[ns_server:info,2014-08-19T16:49:52.997,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 986 to state replica
[ns_server:debug,2014-08-19T16:49:53.009,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 993. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.009,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/993. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.009,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",993,replica,0}
[ns_server:debug,2014-08-19T16:49:53.009,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,740,1019,1003,759,743,1022,1006,993,762,
746,1009,996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,992,
761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,1001,757,741,
1020,1004,760,744,1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:53.032,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_986_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.034,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[986]},
{checkpoints,[{986,0}]},
{name,<<"replication_building_986_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[986]},
{takeover,false},
{suffix,"building_986_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",986,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:53.035,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20575.0>
[rebalance:debug,2014-08-19T16:49:53.035,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.035,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31199.0>,#Ref<16550.0.1.59936>}]}
[rebalance:info,2014-08-19T16:49:53.036,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 986
[rebalance:debug,2014-08-19T16:49:53.036,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31199.0>,#Ref<16550.0.1.59936>}]
[ns_server:debug,2014-08-19T16:49:53.037,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:53.055,ns_1@10.242.238.90:<0.20576.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 986
[ns_server:info,2014-08-19T16:49:53.062,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 731 state to replica
[ns_server:info,2014-08-19T16:49:53.069,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 731 to state replica
[views:debug,2014-08-19T16:49:53.092,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/993. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.093,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",993,replica,0}
[ns_server:debug,2014-08-19T16:49:53.121,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_731_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.123,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[731]},
{checkpoints,[{731,0}]},
{name,<<"replication_building_731_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[731]},
{takeover,false},
{suffix,"building_731_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",731,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:53.123,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20580.0>
[rebalance:debug,2014-08-19T16:49:53.123,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.124,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31219.0>,#Ref<16550.0.1.60050>}]}
[rebalance:info,2014-08-19T16:49:53.124,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 731
[rebalance:debug,2014-08-19T16:49:53.124,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31219.0>,#Ref<16550.0.1.60050>}]
[ns_server:debug,2014-08-19T16:49:53.125,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20581.0> (ok)
[ns_server:debug,2014-08-19T16:49:53.125,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:53.127,ns_1@10.242.238.90:<0.20582.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 731
[ns_server:info,2014-08-19T16:49:53.203,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 985 state to replica
[ns_server:info,2014-08-19T16:49:53.208,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 985 to state replica
[ns_server:debug,2014-08-19T16:49:53.242,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_985_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.244,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[985]},
{checkpoints,[{985,0}]},
{name,<<"replication_building_985_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[985]},
{takeover,false},
{suffix,"building_985_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",985,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:53.244,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20600.0>
[rebalance:debug,2014-08-19T16:49:53.244,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.245,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31281.0>,#Ref<16550.0.1.60333>}]}
[rebalance:info,2014-08-19T16:49:53.245,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 985
[rebalance:debug,2014-08-19T16:49:53.245,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31281.0>,#Ref<16550.0.1.60333>}]
[ns_server:debug,2014-08-19T16:49:53.246,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:53.259,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 991. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.259,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/991. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.260,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",991,replica,0}
[ns_server:debug,2014-08-19T16:49:53.260,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,740,1019,1003,759,743,1022,1006,993,762,
746,1009,996,765,749,1012,999,752,1015,755,1018,1002,758,742,1021,1005,992,
761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,1001,757,741,
1020,1004,991,760,744,1023,1007,994,763,747,1010]
[rebalance:debug,2014-08-19T16:49:53.266,ns_1@10.242.238.90:<0.20601.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 985
[ns_server:info,2014-08-19T16:49:53.272,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 730 state to replica
[ns_server:info,2014-08-19T16:49:53.278,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 730 to state replica
[ns_server:debug,2014-08-19T16:49:53.332,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_730_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.334,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[730]},
{checkpoints,[{730,0}]},
{name,<<"replication_building_730_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[730]},
{takeover,false},
{suffix,"building_730_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",730,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:53.334,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20605.0>
[rebalance:debug,2014-08-19T16:49:53.335,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.335,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31301.0>,#Ref<16550.0.1.60448>}]}
[rebalance:info,2014-08-19T16:49:53.335,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 730
[rebalance:debug,2014-08-19T16:49:53.336,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31301.0>,#Ref<16550.0.1.60448>}]
[ns_server:debug,2014-08-19T16:49:53.336,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:53.337,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20608.0> (ok)
[rebalance:debug,2014-08-19T16:49:53.338,ns_1@10.242.238.90:<0.20610.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 730
[views:debug,2014-08-19T16:49:53.342,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/991. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.342,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",991,replica,0}
[ns_server:info,2014-08-19T16:49:53.414,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 984 state to replica
[ns_server:info,2014-08-19T16:49:53.418,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 984 to state replica
[ns_server:debug,2014-08-19T16:49:53.435,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 989. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.435,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/989. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.435,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",989,replica,0}
[ns_server:debug,2014-08-19T16:49:53.435,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,756,740,1019,1003,759,743,1022,1006,993,762,
746,1009,996,765,749,1012,999,752,1015,755,1018,1002,989,758,742,1021,1005,
992,761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,1001,757,741,
1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:53.453,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_984_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.455,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[984]},
{checkpoints,[{984,0}]},
{name,<<"replication_building_984_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[984]},
{takeover,false},
{suffix,"building_984_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",984,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:53.455,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20641.0>
[rebalance:debug,2014-08-19T16:49:53.456,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.456,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31359.0>,#Ref<16550.0.1.60773>}]}
[rebalance:info,2014-08-19T16:49:53.456,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 984
[rebalance:debug,2014-08-19T16:49:53.456,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31359.0>,#Ref<16550.0.1.60773>}]
[ns_server:debug,2014-08-19T16:49:53.458,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:53.468,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/989. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.469,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",989,replica,0}
[rebalance:debug,2014-08-19T16:49:53.481,ns_1@10.242.238.90:<0.20642.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 984
[ns_server:info,2014-08-19T16:49:53.487,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 729 state to replica
[ns_server:info,2014-08-19T16:49:53.492,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 729 to state replica
[ns_server:debug,2014-08-19T16:49:53.543,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_729_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.544,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[729]},
{checkpoints,[{729,0}]},
{name,<<"replication_building_729_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[729]},
{takeover,false},
{suffix,"building_729_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",729,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:53.544,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20660.0>
[rebalance:debug,2014-08-19T16:49:53.545,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.545,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31379.0>,#Ref<16550.0.1.60894>}]}
[rebalance:info,2014-08-19T16:49:53.545,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 729
[rebalance:debug,2014-08-19T16:49:53.546,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31379.0>,#Ref<16550.0.1.60894>}]
[ns_server:debug,2014-08-19T16:49:53.546,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20661.0> (ok)
[ns_server:debug,2014-08-19T16:49:53.547,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:53.548,ns_1@10.242.238.90:<0.20662.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 729
[ns_server:debug,2014-08-19T16:49:53.577,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 987. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.577,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/987. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.578,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",987,replica,0}
[ns_server:debug,2014-08-19T16:49:53.578,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,987,756,740,1019,1003,759,743,1022,1006,993,
762,746,1009,996,765,749,1012,999,752,1015,755,1018,1002,989,758,742,1021,
1005,992,761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,1001,
757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:info,2014-08-19T16:49:53.622,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 983 state to replica
[ns_server:info,2014-08-19T16:49:53.627,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 983 to state replica
[views:debug,2014-08-19T16:49:53.636,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/987. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:53.637,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",987,replica,0}
[ns_server:debug,2014-08-19T16:49:53.661,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_983_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.662,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[983]},
{checkpoints,[{983,0}]},
{name,<<"replication_building_983_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[983]},
{takeover,false},
{suffix,"building_983_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",983,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:53.663,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20666.0>
[rebalance:debug,2014-08-19T16:49:53.663,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.664,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31436.0>,#Ref<16550.0.1.61141>}]}
[rebalance:info,2014-08-19T16:49:53.664,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 983
[rebalance:debug,2014-08-19T16:49:53.664,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31436.0>,#Ref<16550.0.1.61141>}]
[ns_server:debug,2014-08-19T16:49:53.665,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:53.685,ns_1@10.242.238.90:<0.20667.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 983
[ns_server:info,2014-08-19T16:49:53.690,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 728 state to replica
[ns_server:info,2014-08-19T16:49:53.697,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 728 to state replica
[ns_server:debug,2014-08-19T16:49:53.720,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 739. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.720,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/739. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:53.720,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",739,pending,0}
[ns_server:debug,2014-08-19T16:49:53.720,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,1016,1000,987,756,740,1019,1003,759,743,1022,1006,993,
762,746,1009,996,765,749,1012,999,752,1015,755,739,1018,1002,989,758,742,
1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,
1001,757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:53.746,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_728_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.748,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[728]},
{checkpoints,[{728,0}]},
{name,<<"replication_building_728_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[728]},
{takeover,false},
{suffix,"building_728_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",728,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:53.749,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20685.0>
[rebalance:debug,2014-08-19T16:49:53.749,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.749,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31456.0>,#Ref<16550.0.1.61280>}]}
[rebalance:info,2014-08-19T16:49:53.749,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 728
[rebalance:debug,2014-08-19T16:49:53.750,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31456.0>,#Ref<16550.0.1.61280>}]
[ns_server:debug,2014-08-19T16:49:53.750,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:49:53.751,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20686.0> (ok)
[rebalance:debug,2014-08-19T16:49:53.752,ns_1@10.242.238.90:<0.20687.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 728
[views:debug,2014-08-19T16:49:53.754,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/739. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:53.754,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",739,pending,0}
[ns_server:info,2014-08-19T16:49:53.826,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 982 state to replica
[ns_server:info,2014-08-19T16:49:53.830,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 982 to state replica
[ns_server:debug,2014-08-19T16:49:53.837,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 737. Nacking mccouch update.
[views:debug,2014-08-19T16:49:53.837,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/737. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:53.838,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",737,pending,0}
[ns_server:debug,2014-08-19T16:49:53.838,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,759,743,1022,1006,
993,762,746,1009,996,765,749,1012,999,752,1015,755,739,1018,1002,989,758,742,
1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,1014,754,738,1017,
1001,757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:debug,2014-08-19T16:49:53.864,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_982_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.866,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[982]},
{checkpoints,[{982,0}]},
{name,<<"replication_building_982_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[982]},
{takeover,false},
{suffix,"building_982_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",982,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:53.866,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20705.0>
[rebalance:debug,2014-08-19T16:49:53.866,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.867,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31513.0>,#Ref<16550.0.1.61543>}]}
[rebalance:info,2014-08-19T16:49:53.867,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 982
[rebalance:debug,2014-08-19T16:49:53.868,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31513.0>,#Ref<16550.0.1.61543>}]
[ns_server:debug,2014-08-19T16:49:53.869,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:49:53.871,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/737. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:53.871,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",737,pending,0}
[rebalance:debug,2014-08-19T16:49:53.887,ns_1@10.242.238.90:<0.20706.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 982
[ns_server:info,2014-08-19T16:49:53.893,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 727 state to replica
[ns_server:info,2014-08-19T16:49:53.900,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 727 to state replica
[ns_server:debug,2014-08-19T16:49:53.949,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_727_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:53.951,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[727]},
{checkpoints,[{727,0}]},
{name,<<"replication_building_727_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[727]},
{takeover,false},
{suffix,"building_727_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",727,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:53.952,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20710.0>
[rebalance:debug,2014-08-19T16:49:53.952,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:53.953,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31533.0>,#Ref<16550.0.1.61661>}]}
[rebalance:info,2014-08-19T16:49:53.953,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 727
[rebalance:debug,2014-08-19T16:49:53.953,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31533.0>,#Ref<16550.0.1.61661>}]
[ns_server:debug,2014-08-19T16:49:53.954,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20717.0> (ok)
[ns_server:debug,2014-08-19T16:49:53.954,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:53.955,ns_1@10.242.238.90:<0.20725.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 727
[ns_server:debug,2014-08-19T16:49:54.028,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 735. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.028,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/735. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:54.028,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",735,pending,0}
[ns_server:debug,2014-08-19T16:49:54.028,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,759,743,1022,1006,
993,762,746,1009,996,765,749,1012,999,752,1015,755,739,1018,1002,989,758,742,
1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,735,1014,754,738,
1017,1001,757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:info,2014-08-19T16:49:54.035,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 981 state to replica
[ns_server:info,2014-08-19T16:49:54.041,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 981 to state replica
[ns_server:debug,2014-08-19T16:49:54.074,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_981_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:54.076,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[981]},
{checkpoints,[{981,0}]},
{name,<<"replication_building_981_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[981]},
{takeover,false},
{suffix,"building_981_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",981,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:54.077,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20730.0>
[rebalance:debug,2014-08-19T16:49:54.077,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:54.078,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31590.0>,#Ref<16550.0.1.61948>}]}
[rebalance:info,2014-08-19T16:49:54.078,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 981
[rebalance:debug,2014-08-19T16:49:54.078,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31590.0>,#Ref<16550.0.1.61948>}]
[views:debug,2014-08-19T16:49:54.078,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/735. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:54.079,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",735,pending,0}
[ns_server:debug,2014-08-19T16:49:54.080,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:54.098,ns_1@10.242.238.90:<0.20731.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 981
[ns_server:info,2014-08-19T16:49:54.104,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 726 state to replica
[ns_server:info,2014-08-19T16:49:54.109,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 726 to state replica
[ns_server:debug,2014-08-19T16:49:54.159,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_726_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:54.160,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[726]},
{checkpoints,[{726,0}]},
{name,<<"replication_building_726_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[726]},
{takeover,false},
{suffix,"building_726_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",726,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:49:54.161,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20749.0>
[rebalance:debug,2014-08-19T16:49:54.161,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:54.162,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31624.0>,#Ref<16550.0.1.62108>}]}
[rebalance:info,2014-08-19T16:49:54.162,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 726
[rebalance:debug,2014-08-19T16:49:54.162,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31624.0>,#Ref<16550.0.1.62108>}]
[ns_server:debug,2014-08-19T16:49:54.163,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20750.0> (ok)
[ns_server:debug,2014-08-19T16:49:54.163,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:54.164,ns_1@10.242.238.90:<0.20751.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 726
[ns_server:debug,2014-08-19T16:49:54.212,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 733. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.212,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/733. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:54.212,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",733,pending,0}
[ns_server:debug,2014-08-19T16:49:54.212,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,759,743,1022,1006,
993,762,746,1009,996,765,749,733,1012,999,752,1015,755,739,1018,1002,989,758,
742,1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,735,1014,754,738,
1017,1001,757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[ns_server:info,2014-08-19T16:49:54.237,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 980 state to replica
[ns_server:info,2014-08-19T16:49:54.242,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 980 to state replica
[ns_server:debug,2014-08-19T16:49:54.276,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_980_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:49:54.278,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[980]},
{checkpoints,[{980,0}]},
{name,<<"replication_building_980_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[980]},
{takeover,false},
{suffix,"building_980_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",980,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:49:54.278,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.20755.0>
[rebalance:debug,2014-08-19T16:49:54.278,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:49:54.279,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.31680.0>,#Ref<16550.0.1.62377>}]}
[rebalance:info,2014-08-19T16:49:54.279,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 980
[views:debug,2014-08-19T16:49:54.279,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/733. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:54.279,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",733,pending,0}
[rebalance:debug,2014-08-19T16:49:54.280,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.31680.0>,#Ref<16550.0.1.62377>}]
[ns_server:debug,2014-08-19T16:49:54.281,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:49:54.301,ns_1@10.242.238.90:<0.20756.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 980
[ns_server:debug,2014-08-19T16:49:54.404,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 990. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.405,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/990. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.405,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",990,replica,0}
[ns_server:debug,2014-08-19T16:49:54.405,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,
1006,993,762,746,1009,996,765,749,733,1012,999,752,1015,755,739,1018,1002,
989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,735,1014,
754,738,1017,1001,757,741,1020,1004,991,760,744,1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:54.455,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/990. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.455,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",990,replica,0}
[ns_server:debug,2014-08-19T16:49:54.530,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 988. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.530,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/988. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.531,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",988,replica,0}
[ns_server:debug,2014-08-19T16:49:54.531,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,
1006,993,762,746,1009,996,765,749,733,1012,999,752,1015,755,739,1018,1002,
989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,735,1014,
754,738,1017,1001,988,757,741,1020,1004,991,760,744,1023,1007,994,763,747,
1010]
[views:debug,2014-08-19T16:49:54.564,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/988. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.565,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",988,replica,0}
[ns_server:debug,2014-08-19T16:49:54.696,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 986. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.696,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/986. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.697,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",986,replica,0}
[ns_server:debug,2014-08-19T16:49:54.697,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,
1006,993,762,746,1009,996,765,749,733,1012,999,752,1015,986,755,739,1018,
1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,767,751,735,
1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,1023,1007,994,763,
747,1010]
[views:debug,2014-08-19T16:49:54.763,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/986. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.764,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",986,replica,0}
[ns_server:debug,2014-08-19T16:49:54.914,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 984. Nacking mccouch update.
[views:debug,2014-08-19T16:49:54.914,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/984. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.914,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",984,replica,0}
[ns_server:debug,2014-08-19T16:49:54.914,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,765,749,733,1012,999,752,1015,986,755,739,
1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,767,
751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,1023,1007,
994,763,747,1010]
[views:debug,2014-08-19T16:49:54.964,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/984. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:54.964,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",984,replica,0}
[ns_server:debug,2014-08-19T16:49:55.115,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 982. Nacking mccouch update.
[views:debug,2014-08-19T16:49:55.115,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/982. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:55.115,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",982,replica,0}
[ns_server:debug,2014-08-19T16:49:55.115,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,765,749,733,1012,999,752,1015,986,755,739,
1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,982,
767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,1023,
1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:55.166,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/982. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:55.166,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",982,replica,0}
[ns_server:debug,2014-08-19T16:49:55.307,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 980. Nacking mccouch update.
[views:debug,2014-08-19T16:49:55.307,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/980. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:55.307,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",980,replica,0}
[ns_server:debug,2014-08-19T16:49:55.307,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,980,765,749,733,1012,999,752,1015,986,755,739,
1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,982,
767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,1023,
1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:55.375,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/980. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:55.375,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",980,replica,0}
[ns_server:debug,2014-08-19T16:49:55.539,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 736. Nacking mccouch update.
[views:debug,2014-08-19T16:49:55.539,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/736. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.539,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",736,pending,0}
[ns_server:debug,2014-08-19T16:49:55.540,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,980,765,749,733,1012,999,752,736,1015,986,755,
739,1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,
982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,
1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:55.573,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/736. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.573,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",736,pending,0}
[ns_server:debug,2014-08-19T16:49:55.674,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 734. Nacking mccouch update.
[views:debug,2014-08-19T16:49:55.674,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/734. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.674,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",734,pending,0}
[ns_server:debug,2014-08-19T16:49:55.674,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,734,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,980,765,749,733,1012,999,752,736,1015,986,755,
739,1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,1011,998,
982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,
1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:55.733,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/734. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.733,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",734,pending,0}
[ns_server:debug,2014-08-19T16:49:55.816,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 732. Nacking mccouch update.
[views:debug,2014-08-19T16:49:55.816,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/732. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.816,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",732,pending,0}
[ns_server:debug,2014-08-19T16:49:55.816,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,734,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,1009,996,980,765,749,733,1012,999,752,736,1015,986,755,
739,1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,732,1011,
998,982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,760,744,
1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:55.876,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/732. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:55.876,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",732,pending,0}
[ns_server:debug,2014-08-19T16:49:56.042,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 730. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.042,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/730. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.042,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",730,pending,0}
[ns_server:debug,2014-08-19T16:49:56.042,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,734,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,730,1009,996,980,765,749,733,1012,999,752,736,1015,986,
755,739,1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,
760,744,1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:56.100,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/730. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.101,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",730,pending,0}
[ns_server:debug,2014-08-19T16:49:56.176,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 728. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.176,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/728. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.176,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",728,pending,0}
[ns_server:debug,2014-08-19T16:49:56.176,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,734,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,730,1009,996,980,765,749,733,1012,999,752,736,1015,986,
755,739,1018,1002,989,758,742,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,
760,744,728,1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:56.210,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/728. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.210,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",728,pending,0}
[ns_server:debug,2014-08-19T16:49:56.367,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 726. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.367,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/726. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.367,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",726,pending,0}
[ns_server:debug,2014-08-19T16:49:56.367,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,766,750,734,1013,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,
1022,1006,993,762,746,730,1009,996,980,765,749,733,1012,999,752,736,1015,986,
755,739,1018,1002,989,758,742,726,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,754,738,1017,1001,988,757,741,1020,1004,991,
760,744,728,1023,1007,994,763,747,1010]
[views:debug,2014-08-19T16:49:56.451,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/726. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:56.451,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",726,pending,0}
[rebalance:debug,2014-08-19T16:49:56.452,ns_1@10.242.238.90:<0.20165.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:56.452,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20165.0> (ok)
[ns_server:debug,2014-08-19T16:49:56.636,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 985. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.636,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/985. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.636,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",985,replica,0}
[ns_server:debug,2014-08-19T16:49:56.636,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,1006,
993,762,746,730,1009,996,980,765,749,733,1012,999,752,736,1015,986,755,739,
1018,1002,989,758,742,726,1021,1005,992,761,745,1008,995,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,991,760,
744,728,1023,1007,994,763,747,1010,766,734,1013]
[views:debug,2014-08-19T16:49:56.670,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/985. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.671,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",985,replica,0}
[ns_server:debug,2014-08-19T16:49:56.753,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 983. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.754,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/983. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.754,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",983,replica,0}
[ns_server:debug,2014-08-19T16:49:56.754,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,1006,
993,762,746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,
739,1018,1002,989,758,742,726,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,
991,760,744,728,1023,1007,994,763,747,1010,766,734,1013]
[views:debug,2014-08-19T16:49:56.804,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/983. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.804,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",983,replica,0}
[ns_server:debug,2014-08-19T16:49:56.888,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 981. Nacking mccouch update.
[views:debug,2014-08-19T16:49:56.888,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/981. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.888,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",981,replica,0}
[ns_server:debug,2014-08-19T16:49:56.888,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,1006,
993,762,746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,
739,1018,1002,989,758,742,726,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,
991,760,744,728,1023,1007,994,763,747,1010,981,766,734,1013]
[views:debug,2014-08-19T16:49:56.938,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/981. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:49:56.938,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",981,replica,0}
[ns_server:debug,2014-08-19T16:49:57.093,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 731. Nacking mccouch update.
[views:debug,2014-08-19T16:49:57.093,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/731. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.093,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",731,pending,0}
[ns_server:debug,2014-08-19T16:49:57.094,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,1006,
993,762,746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,
739,1018,1002,989,758,742,726,1021,1005,992,761,745,1008,995,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,
991,760,744,728,1023,1007,994,763,747,731,1010,981,766,734,1013]
[views:debug,2014-08-19T16:49:57.127,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/731. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.127,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",731,pending,0}
[ns_server:debug,2014-08-19T16:49:57.236,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 729. Nacking mccouch update.
[views:debug,2014-08-19T16:49:57.236,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/729. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.236,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",729,pending,0}
[ns_server:debug,2014-08-19T16:49:57.236,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,753,737,1016,1000,987,756,740,1019,1003,990,759,743,1022,1006,
993,762,746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,
739,1018,1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,
991,760,744,728,1023,1007,994,763,747,731,1010,981,766,734,1013]
[views:debug,2014-08-19T16:49:57.296,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/729. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.296,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",729,pending,0}
[ns_server:debug,2014-08-19T16:49:57.388,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 727. Nacking mccouch update.
[views:debug,2014-08-19T16:49:57.388,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/727. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.388,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",727,pending,0}
[ns_server:debug,2014-08-19T16:49:57.388,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,987,756,740,1019,1003,990,759,743,727,1022,1006,993,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,1018,
1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,764,748,732,1011,998,
982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,991,760,744,
728,1023,1007,994,763,747,731,1010,981,766,734,1013,753,1000]
[views:debug,2014-08-19T16:49:57.438,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/727. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:49:57.438,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",727,pending,0}
[rebalance:debug,2014-08-19T16:49:57.441,ns_1@10.242.238.90:<0.20751.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.442,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20751.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.472,ns_1@10.242.238.90:<0.20687.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.472,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20687.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.506,ns_1@10.242.238.90:<0.20610.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:57.506,ns_1@10.242.238.90:<0.20725.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.506,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20610.0> (ok)
[ns_server:debug,2014-08-19T16:49:57.506,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20725.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.590,ns_1@10.242.238.90:<0.20662.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:57.590,ns_1@10.242.238.90:<0.20557.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.590,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20662.0> (ok)
[ns_server:debug,2014-08-19T16:49:57.590,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20557.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.697,ns_1@10.242.238.90:<0.20582.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:57.697,ns_1@10.242.238.90:<0.20520.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.697,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20582.0> (ok)
[ns_server:debug,2014-08-19T16:49:57.697,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20520.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.831,ns_1@10.242.238.90:<0.20456.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.831,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20456.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.831,ns_1@10.242.238.90:<0.20545.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.831,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20545.0> (ok)
[rebalance:debug,2014-08-19T16:49:57.931,ns_1@10.242.238.90:<0.20392.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:57.931,ns_1@10.242.238.90:<0.20495.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:57.932,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20392.0> (ok)
[ns_server:debug,2014-08-19T16:49:57.932,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20495.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.032,ns_1@10.242.238.90:<0.20335.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:58.032,ns_1@10.242.238.90:<0.20417.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.032,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20335.0> (ok)
[ns_server:debug,2014-08-19T16:49:58.032,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20417.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.132,ns_1@10.242.238.90:<0.20285.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.133,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20285.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.133,ns_1@10.242.238.90:<0.20367.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.133,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20367.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.233,ns_1@10.242.238.90:<0.20221.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:58.233,ns_1@10.242.238.90:<0.20310.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.233,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20221.0> (ok)
[ns_server:debug,2014-08-19T16:49:58.233,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20310.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.333,ns_1@10.242.238.90:<0.20756.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:58.333,ns_1@10.242.238.90:<0.20260.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.333,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20756.0> (ok)
[ns_server:debug,2014-08-19T16:49:58.333,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20260.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.459,ns_1@10.242.238.90:<0.20706.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.459,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20706.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.459,ns_1@10.242.238.90:<0.20196.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.459,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20196.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.599,ns_1@10.242.238.90:<0.20642.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.599,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20642.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.599,ns_1@10.242.238.90:<0.20731.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.599,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20731.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.716,ns_1@10.242.238.90:<0.20576.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.716,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20576.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.716,ns_1@10.242.238.90:<0.20667.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.716,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20667.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.841,ns_1@10.242.238.90:<0.20601.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:58.841,ns_1@10.242.238.90:<0.20525.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.841,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20601.0> (ok)
[ns_server:debug,2014-08-19T16:49:58.841,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20525.0> (ok)
[rebalance:debug,2014-08-19T16:49:58.863,ns_1@10.242.238.90:<0.21036.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 743
[rebalance:debug,2014-08-19T16:49:58.950,ns_1@10.242.238.90:<0.20551.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:58.950,ns_1@10.242.238.90:<0.20475.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:58.950,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20551.0> (ok)
[ns_server:debug,2014-08-19T16:49:58.950,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20475.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.042,ns_1@10.242.238.90:<0.20411.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:59.042,ns_1@10.242.238.90:<0.20500.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.042,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20411.0> (ok)
[ns_server:debug,2014-08-19T16:49:59.042,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20500.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.159,ns_1@10.242.238.90:<0.20436.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:59.159,ns_1@10.242.238.90:<0.20360.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.159,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20436.0> (ok)
[ns_server:debug,2014-08-19T16:49:59.159,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20360.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.284,ns_1@10.242.238.90:<0.20386.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:59.284,ns_1@10.242.238.90:<0.20304.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.285,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20386.0> (ok)
[ns_server:debug,2014-08-19T16:49:59.285,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20304.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.410,ns_1@10.242.238.90:<0.20240.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:49:59.410,ns_1@10.242.238.90:<0.20329.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.410,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20240.0> (ok)
[ns_server:debug,2014-08-19T16:49:59.410,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20329.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.551,ns_1@10.242.238.90:<0.20270.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.551,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.20270.0> (ok)
[rebalance:debug,2014-08-19T16:49:59.601,ns_1@10.242.238.90:<0.21036.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:49:59.601,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21036.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.509,ns_1@10.242.238.90:<0.21046.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 744
[rebalance:debug,2014-08-19T16:50:00.509,ns_1@10.242.238.90:<0.21049.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 745
[rebalance:debug,2014-08-19T16:50:00.510,ns_1@10.242.238.90:<0.21046.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.510,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21046.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.510,ns_1@10.242.238.90:<0.21049.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.510,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21049.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.633,ns_1@10.242.238.90:<0.21052.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 746
[rebalance:debug,2014-08-19T16:50:00.634,ns_1@10.242.238.90:<0.21055.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 999
[rebalance:debug,2014-08-19T16:50:00.634,ns_1@10.242.238.90:<0.21052.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.635,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21052.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.635,ns_1@10.242.238.90:<0.21055.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.635,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21055.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.733,ns_1@10.242.238.90:<0.21058.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 998
[rebalance:debug,2014-08-19T16:50:00.733,ns_1@10.242.238.90:<0.21061.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1001
[rebalance:debug,2014-08-19T16:50:00.734,ns_1@10.242.238.90:<0.21058.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.734,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21058.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.734,ns_1@10.242.238.90:<0.21061.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.735,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21061.0> (ok)
[rebalance:debug,2014-08-19T16:50:00.833,ns_1@10.242.238.90:<0.21064.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 1000
[rebalance:debug,2014-08-19T16:50:00.835,ns_1@10.242.238.90:<0.21064.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:00.835,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21064.0> (ok)
[rebalance:debug,2014-08-19T16:50:01.826,ns_1@10.242.238.90:<0.21073.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 727
[rebalance:debug,2014-08-19T16:50:01.827,ns_1@10.242.238.90:<0.21073.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:01.827,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21073.0> (ok)
[rebalance:debug,2014-08-19T16:50:01.935,ns_1@10.242.238.90:<0.21076.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 729
[rebalance:debug,2014-08-19T16:50:01.936,ns_1@10.242.238.90:<0.21076.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:01.936,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21076.0> (ok)
[rebalance:debug,2014-08-19T16:50:01.995,ns_1@10.242.238.90:<0.21079.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 731
[rebalance:debug,2014-08-19T16:50:01.997,ns_1@10.242.238.90:<0.21079.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:01.997,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21079.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.012,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:02.015,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.015,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3120 us
[ns_server:debug,2014-08-19T16:50:02.016,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.016,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{488,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:02.066,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:02.067,ns_1@10.242.238.90:<0.21083.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 726
[rebalance:debug,2014-08-19T16:50:02.067,ns_1@10.242.238.90:<0.21084.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 733
[rebalance:debug,2014-08-19T16:50:02.069,ns_1@10.242.238.90:<0.21083.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.069,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21083.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.069,ns_1@10.242.238.90:<0.21084.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.069,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21084.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.070,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.071,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4032 us
[ns_server:debug,2014-08-19T16:50:02.071,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.071,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{490,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:02.162,ns_1@10.242.238.90:<0.21090.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 728
[rebalance:debug,2014-08-19T16:50:02.162,ns_1@10.242.238.90:<0.21091.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 735
[rebalance:debug,2014-08-19T16:50:02.163,ns_1@10.242.238.90:<0.21090.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.163,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21090.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.164,ns_1@10.242.238.90:<0.21091.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.164,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21091.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.278,ns_1@10.242.238.90:<0.21096.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 730
[rebalance:debug,2014-08-19T16:50:02.279,ns_1@10.242.238.90:<0.21099.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 737
[rebalance:debug,2014-08-19T16:50:02.280,ns_1@10.242.238.90:<0.21096.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.280,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21096.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.280,ns_1@10.242.238.90:<0.21099.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.280,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21099.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.396,ns_1@10.242.238.90:<0.21102.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 739
[rebalance:debug,2014-08-19T16:50:02.396,ns_1@10.242.238.90:<0.21105.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 732
[rebalance:debug,2014-08-19T16:50:02.397,ns_1@10.242.238.90:<0.21102.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:02.397,ns_1@10.242.238.90:<0.21105.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.397,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21102.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.397,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21105.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.496,ns_1@10.242.238.90:<0.21108.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 734
[rebalance:debug,2014-08-19T16:50:02.497,ns_1@10.242.238.90:<0.21111.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 741
[rebalance:debug,2014-08-19T16:50:02.498,ns_1@10.242.238.90:<0.21108.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.498,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21108.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.498,ns_1@10.242.238.90:<0.21111.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.498,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21111.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.611,ns_1@10.242.238.90:<0.21115.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 743)
[ns_server:debug,2014-08-19T16:50:02.611,ns_1@10.242.238.90:<0.21115.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:02.611,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21114.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.612,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:02.612,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:02.612,ns_1@10.242.238.90:<0.21116.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:02.612,ns_1@10.242.238.90:<0.21116.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:02.612,ns_1@10.242.238.90:<0.20243.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:02.613,ns_1@10.242.238.90:<0.21117.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 736
[rebalance:debug,2014-08-19T16:50:02.614,ns_1@10.242.238.90:<0.21117.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.614,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21117.0> (ok)
[ns_server:info,2014-08-19T16:50:02.659,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 743 state to active
[ns_server:debug,2014-08-19T16:50:02.661,ns_1@10.242.238.90:<0.21121.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 745)
[ns_server:debug,2014-08-19T16:50:02.661,ns_1@10.242.238.90:<0.21121.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:02.661,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21120.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.21122.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.21122.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.20193.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:02.662,ns_1@10.242.238.90:<0.21123.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 738
[ns_server:debug,2014-08-19T16:50:02.683,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:02.686,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3279 us
[ns_server:debug,2014-08-19T16:50:02.686,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.687,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.687,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{743,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:02.709,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 745 state to active
[rebalance:debug,2014-08-19T16:50:02.714,ns_1@10.242.238.90:<0.21127.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 981
[rebalance:debug,2014-08-19T16:50:02.714,ns_1@10.242.238.90:<0.21130.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 740
[views:debug,2014-08-19T16:50:02.726,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/743. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:02.726,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",743,active,1}
[rebalance:debug,2014-08-19T16:50:02.727,ns_1@10.242.238.90:<0.21123.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.727,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21123.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.727,ns_1@10.242.238.90:<0.21127.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.727,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21127.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.732,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:02.733,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.734,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1523 us
[ns_server:debug,2014-08-19T16:50:02.734,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.734,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{745,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:02.760,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/745. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:02.760,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",745,active,1}
[rebalance:debug,2014-08-19T16:50:02.761,ns_1@10.242.238.90:<0.21130.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.761,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21130.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.784,ns_1@10.242.238.90:<0.21135.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 742
[rebalance:debug,2014-08-19T16:50:02.785,ns_1@10.242.238.90:<0.21138.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 983
[rebalance:debug,2014-08-19T16:50:02.785,ns_1@10.242.238.90:<0.21135.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.785,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21135.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.786,ns_1@10.242.238.90:<0.21138.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.786,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21138.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.865,ns_1@10.242.238.90:<0.21142.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 744)
[ns_server:debug,2014-08-19T16:50:02.865,ns_1@10.242.238.90:<0.21142.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:02.866,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21141.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.866,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:02.866,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:02.866,ns_1@10.242.238.90:<0.21143.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:02.867,ns_1@10.242.238.90:<0.21143.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:02.867,ns_1@10.242.238.90:<0.20218.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:02.867,ns_1@10.242.238.90:<0.21144.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 985
[rebalance:debug,2014-08-19T16:50:02.869,ns_1@10.242.238.90:<0.21144.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.869,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21144.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.899,ns_1@10.242.238.90:<0.21148.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 746)
[ns_server:debug,2014-08-19T16:50:02.899,ns_1@10.242.238.90:<0.21148.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:02.899,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21147.0> (ok)
[rebalance:debug,2014-08-19T16:50:02.900,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:02.900,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:02.900,ns_1@10.242.238.90:<0.21149.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:02.900,ns_1@10.242.238.90:<0.21149.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:02.900,ns_1@10.242.238.90:<0.20168.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:02.901,ns_1@10.242.238.90:<0.21150.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 987
[rebalance:debug,2014-08-19T16:50:02.902,ns_1@10.242.238.90:<0.21150.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.903,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21150.0> (ok)
[ns_server:info,2014-08-19T16:50:02.914,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 744 state to active
[ns_server:debug,2014-08-19T16:50:02.938,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:02.945,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.946,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6390 us
[ns_server:debug,2014-08-19T16:50:02.946,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.946,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{744,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:02.949,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 746 state to active
[ns_server:debug,2014-08-19T16:50:02.968,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:02.971,ns_1@10.242.238.90:<0.21154.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 980
[rebalance:debug,2014-08-19T16:50:02.971,ns_1@10.242.238.90:<0.21155.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 989
[ns_server:debug,2014-08-19T16:50:02.974,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.974,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5860 us
[ns_server:debug,2014-08-19T16:50:02.974,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:02.975,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{746,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:02.993,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/744. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:02.993,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",744,active,1}
[rebalance:debug,2014-08-19T16:50:02.994,ns_1@10.242.238.90:<0.21154.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:02.994,ns_1@10.242.238.90:<0.21155.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:02.994,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21154.0> (ok)
[ns_server:debug,2014-08-19T16:50:02.994,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21155.0> (ok)
[views:debug,2014-08-19T16:50:03.052,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/746. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:03.052,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",746,active,1}
[rebalance:debug,2014-08-19T16:50:03.078,ns_1@10.242.238.90:<0.21161.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 991
[rebalance:debug,2014-08-19T16:50:03.078,ns_1@10.242.238.90:<0.21164.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 982
[rebalance:debug,2014-08-19T16:50:03.079,ns_1@10.242.238.90:<0.21164.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.079,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21164.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.079,ns_1@10.242.238.90:<0.21161.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.079,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21161.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.145,ns_1@10.242.238.90:<0.21167.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 993
[rebalance:debug,2014-08-19T16:50:03.145,ns_1@10.242.238.90:<0.21170.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 984
[rebalance:debug,2014-08-19T16:50:03.146,ns_1@10.242.238.90:<0.21170.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.146,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21170.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.146,ns_1@10.242.238.90:<0.21167.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.146,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21167.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.253,ns_1@10.242.238.90:<0.21173.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 986
[rebalance:debug,2014-08-19T16:50:03.254,ns_1@10.242.238.90:<0.21176.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 995
[rebalance:debug,2014-08-19T16:50:03.254,ns_1@10.242.238.90:<0.21173.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.254,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21173.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.255,ns_1@10.242.238.90:<0.21176.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.255,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21176.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.337,ns_1@10.242.238.90:<0.21183.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 997
[rebalance:debug,2014-08-19T16:50:03.338,ns_1@10.242.238.90:<0.21186.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 988
[rebalance:debug,2014-08-19T16:50:03.339,ns_1@10.242.238.90:<0.21183.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:03.339,ns_1@10.242.238.90:<0.21186.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.339,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21183.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.339,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21186.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.430,ns_1@10.242.238.90:<0.21204.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 990
[rebalance:debug,2014-08-19T16:50:03.432,ns_1@10.242.238.90:<0.21204.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.432,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21204.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.477,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.477,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.477,ns_1@10.242.238.90:<0.21208.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.478,ns_1@10.242.238.90:<0.21208.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.478,ns_1@10.242.238.90:<0.20199.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:03.480,ns_1@10.242.238.90:<0.21209.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 992
[rebalance:debug,2014-08-19T16:50:03.481,ns_1@10.242.238.90:<0.21209.0>:janitor_agent:handle_call:795]Done
[ns_server:info,2014-08-19T16:50:03.481,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 999 state to replica
[ns_server:info,2014-08-19T16:50:03.481,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[999,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023] ([999], [])
[ns_server:debug,2014-08-19T16:50:03.482,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:handle_info:447]Starting compaction for the following buckets:
[<<"default">>]
[ns_server:debug,2014-08-19T16:50:03.482,ns_1@10.242.238.90:<0.21212.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[999,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.232944>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[999,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:03.483,ns_1@10.242.238.90:<0.21212.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.20145.0>
[ns_server:info,2014-08-19T16:50:03.483,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:03.484,ns_1@10.242.238.90:<0.21214.0>:compaction_daemon:try_to_cleanup_indexes:650]Cleaning up indexes for bucket `default`
[ns_server:info,2014-08-19T16:50:03.485,ns_1@10.242.238.90:<0.21214.0>:compaction_daemon:spawn_bucket_compactor:609]Compacting bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:info,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{999,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.21217.0>:compaction_daemon:bucket_needs_compaction:1042]`default` data size is 6624, disk size is 997536
[ns_server:debug,2014-08-19T16:50:03.495,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:03.495,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:handle_info:505]Finished compaction iteration.
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.21218.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.21218.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:compaction_daemon<0.17567.0>:compaction_daemon:schedule_next_compaction:1519]Finished compaction too soon. Next run will be in 30s
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.20145.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:03.496,ns_1@10.242.238.90:<0.21212.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.20145.0>
[ns_server:debug,2014-08-19T16:50:03.497,ns_1@10.242.238.90:<0.21212.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:03.497,ns_1@10.242.238.90:<0.21220.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:03.497,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.20145.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.20146.0>,<<"cut off">>,<<"cut off">>,[],70,false,false,0,
{1408,452603,495872},
completed,
{<0.21212.0>,#Ref<0.0.0.232961>},
<<"replication_ns_1@10.242.238.90">>,<0.20145.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:03.497,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21212.0>,{#Ref<0.0.0.232946>,<0.21220.0>}}
[ns_server:debug,2014-08-19T16:50:03.497,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21209.0> (ok)
[error_logger:info,2014-08-19T16:50:03.497,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21220.0>},
{name,
{new_child_id,
[999,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[999,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:03.504,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[999,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:03.504,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21221.0>
[ns_server:debug,2014-08-19T16:50:03.505,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:03.509,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.509,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3718 us
[ns_server:debug,2014-08-19T16:50:03.510,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.510,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{999,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:03.528,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.529,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.529,ns_1@10.242.238.90:<0.21223.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.529,ns_1@10.242.238.90:<0.21223.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.529,ns_1@10.242.238.90:<0.20149.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:03.530,ns_1@10.242.238.90:<0.21224.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 994
[rebalance:debug,2014-08-19T16:50:03.532,ns_1@10.242.238.90:<0.21224.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.532,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21224.0> (ok)
[ns_server:info,2014-08-19T16:50:03.532,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 1001 state to replica
[ns_server:info,2014-08-19T16:50:03.532,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023] ([1001], [])
[ns_server:debug,2014-08-19T16:50:03.535,ns_1@10.242.238.90:<0.21227.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[999,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.233255>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:03.535,ns_1@10.242.238.90:<0.21227.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21220.0>
[ns_server:info,2014-08-19T16:50:03.535,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:03.541,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{999,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:03.542,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:03.542,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:03.542,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:03.542,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:03.542,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21229.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21229.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21220.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:03.543,ns_1@10.242.238.90:<0.21227.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21220.0>
[ns_server:debug,2014-08-19T16:50:03.544,ns_1@10.242.238.90:<0.21227.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:03.544,ns_1@10.242.238.90:<0.21231.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:03.544,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21220.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21221.0>,<<"cut off">>,<<"cut off">>,[],73,false,false,0,
{1408,452603,542603},
completed,
{<0.21227.0>,#Ref<0.0.0.233268>},
<<"replication_ns_1@10.242.238.90">>,<0.21220.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:03.544,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21227.0>,{#Ref<0.0.0.233257>,<0.21231.0>}}
[error_logger:info,2014-08-19T16:50:03.544,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21231.0>},
{name,
{new_child_id,
[999,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[999,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:03.549,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:03.550,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:03.551,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21232.0>
[ns_server:debug,2014-08-19T16:50:03.554,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.554,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4660 us
[ns_server:debug,2014-08-19T16:50:03.555,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.555,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1001,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:03.689,ns_1@10.242.238.90:<0.21234.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 996
[rebalance:debug,2014-08-19T16:50:03.690,ns_1@10.242.238.90:<0.21234.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:03.690,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21234.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.741,ns_1@10.242.238.90:<0.21238.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 727)
[ns_server:debug,2014-08-19T16:50:03.741,ns_1@10.242.238.90:<0.21238.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.741,ns_1@10.242.238.90:<0.21240.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 729)
[ns_server:debug,2014-08-19T16:50:03.741,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21237.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21240.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21244.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 733)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21244.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21239.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21245.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 731)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21245.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21241.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21250.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 735)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21242.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21250.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.21254.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 741)
[rebalance:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.742,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21243.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21254.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21255.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 728)
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21249.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21255.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21257.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21259.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 737)
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21248.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21258.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21257.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21259.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21258.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20709.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21261.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21262.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 726)
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21261.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.743,ns_1@10.242.238.90:<0.21264.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21247.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21265.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21262.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:info,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20645.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21266.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 739)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21264.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21265.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:info,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20528.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21246.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21267.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21266.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:info,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20478.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21269.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 730)
[rebalance:info,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20579.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21267.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21251.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21270.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21269.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:info,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20307.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21272.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 732)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21270.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21272.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21252.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.21273.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 734)
[rebalance:debug,2014-08-19T16:50:03.744,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21253.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21273.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:info,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20684.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21275.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21274.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 738)
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21256.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21276.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21275.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21274.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21276.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21277.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 736)
[rebalance:info,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20414.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21260.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21277.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21278.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 740)
[rebalance:info,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20734.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21263.0> (ok)
[rebalance:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21280.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21279.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 742)
[ns_server:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.21278.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:50:03.745,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21280.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21279.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21268.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21282.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21281.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21271.0> (ok)
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21282.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20364.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21281.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21283.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21284.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20604.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21283.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20554.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21284.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21285.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20389.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20503.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21285.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:info,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20453.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21286.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.746,ns_1@10.242.238.90:<0.21286.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.747,ns_1@10.242.238.90:<0.20332.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.747,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.747,ns_1@10.242.238.90:<0.21287.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.747,ns_1@10.242.238.90:<0.21287.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.747,ns_1@10.242.238.90:<0.20282.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:03.904,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:03.904,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.904,ns_1@10.242.238.90:<0.21288.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.904,ns_1@10.242.238.90:<0.21288.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.904,ns_1@10.242.238.90:<0.20238.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.918,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:03.921,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.921,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3337 us
[ns_server:debug,2014-08-19T16:50:03.921,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.922,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{487,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:03.928,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:info,2014-08-19T16:50:03.929,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 998 state to replica
[ns_server:debug,2014-08-19T16:50:03.929,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.929,ns_1@10.242.238.90:<0.21290.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.929,ns_1@10.242.238.90:<0.21290.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:info,2014-08-19T16:50:03.929,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[998,999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([998], [])
[rebalance:info,2014-08-19T16:50:03.929,ns_1@10.242.238.90:<0.20174.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.930,ns_1@10.242.238.90:<0.21291.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[998,999,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.233901>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[998,999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:03.930,ns_1@10.242.238.90:<0.21291.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21231.0>
[ns_server:info,2014-08-19T16:50:03.930,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:03.941,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{998,1},
{999,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:03.941,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21293.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:03.942,ns_1@10.242.238.90:<0.21293.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21231.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21291.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21231.0>
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21291.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21295.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:03.943,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21231.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21232.0>,<<"cut off">>,<<"cut off">>,[],76,false,false,0,
{1408,452603,942120},
completed,
{<0.21291.0>,#Ref<0.0.0.233914>},
<<"replication_ns_1@10.242.238.90">>,<0.21231.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:03.944,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21291.0>,{#Ref<0.0.0.233903>,<0.21295.0>}}
[error_logger:info,2014-08-19T16:50:03.944,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21295.0>},
{name,
{new_child_id,
[998,999,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[998,999,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:03.950,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[998,999,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[ns_server:debug,2014-08-19T16:50:03.950,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:03.950,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21296.0>
[ns_server:debug,2014-08-19T16:50:03.953,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.953,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2791 us
[ns_server:debug,2014-08-19T16:50:03.954,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.954,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{998,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:03.974,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:03.982,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7325 us
[ns_server:debug,2014-08-19T16:50:03.982,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.983,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:03.983,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{489,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.000,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.003,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.003,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2967 us
[ns_server:debug,2014-08-19T16:50:04.003,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.004,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{471,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.017,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.018,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.018,ns_1@10.242.238.90:<0.21299.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.018,ns_1@10.242.238.90:<0.21299.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.018,ns_1@10.242.238.90:<0.20327.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.018,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.021,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2484 us
[ns_server:debug,2014-08-19T16:50:04.021,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.021,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.022,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{475,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.025,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 1000 state to replica
[ns_server:info,2014-08-19T16:50:04.025,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([1000], [])
[ns_server:debug,2014-08-19T16:50:04.026,ns_1@10.242.238.90:<0.21301.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.234123>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.027,ns_1@10.242.238.90:<0.21301.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21295.0>
[ns_server:info,2014-08-19T16:50:04.027,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.034,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[rebalance:debug,2014-08-19T16:50:04.034,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:info,2014-08-19T16:50:04.034,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:debug,2014-08-19T16:50:04.034,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21303.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21303.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:info,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[rebalance:info,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.20549.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21304.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21304.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.035,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.036,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.036,ns_1@10.242.238.90:<0.21295.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.036,ns_1@10.242.238.90:<0.21301.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21295.0>
[ns_server:debug,2014-08-19T16:50:04.038,ns_1@10.242.238.90:<0.21301.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.038,ns_1@10.242.238.90:<0.21306.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.039,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21295.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21296.0>,<<"cut off">>,<<"cut off">>,[],79,false,false,0,
{1408,452604,35170},
completed,
{<0.21301.0>,#Ref<0.0.0.234137>},
<<"replication_ns_1@10.242.238.90">>,<0.21295.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.039,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21301.0>,{#Ref<0.0.0.234125>,<0.21306.0>}}
[error_logger:info,2014-08-19T16:50:04.039,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21306.0>},
{name,
{new_child_id,
[998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.045,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.047,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.047,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21307.0>
[ns_server:debug,2014-08-19T16:50:04.048,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.048,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3024 us
[ns_server:debug,2014-08-19T16:50:04.049,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:50:04.049,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 740 state to active
[ns_server:debug,2014-08-19T16:50:04.049,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{1000,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.065,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:04.067,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 741 state to active
[ns_server:debug,2014-08-19T16:50:04.068,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2828 us
[ns_server:debug,2014-08-19T16:50:04.068,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.069,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{470,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.069,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:50:04.085,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.086,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.086,ns_1@10.242.238.90:<0.21309.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.086,ns_1@10.242.238.90:<0.21309.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.086,ns_1@10.242.238.90:<0.20729.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.088,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.090,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.091,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2848 us
[ns_server:debug,2014-08-19T16:50:04.091,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.092,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{479,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.108,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.108,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.108,ns_1@10.242.238.90:<0.21311.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.108,ns_1@10.242.238.90:<0.21311.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.109,ns_1@10.242.238.90:<0.20395.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.111,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.117,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 5471 us
[ns_server:debug,2014-08-19T16:50:04.117,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.117,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.118,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{473,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.120,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.120,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.120,ns_1@10.242.238.90:<0.21312.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.121,ns_1@10.242.238.90:<0.21312.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.121,ns_1@10.242.238.90:<0.20754.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[views:debug,2014-08-19T16:50:04.127,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/740. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.127,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",740,active,1}
[rebalance:debug,2014-08-19T16:50:04.133,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.134,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.134,ns_1@10.242.238.90:<0.21314.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.134,ns_1@10.242.238.90:<0.21314.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:04.134,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:info,2014-08-19T16:50:04.134,ns_1@10.242.238.90:<0.20523.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.137,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.137,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3071 us
[ns_server:debug,2014-08-19T16:50:04.137,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.138,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{477,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.140,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 995 state to replica
[ns_server:info,2014-08-19T16:50:04.140,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([995], [])
[ns_server:debug,2014-08-19T16:50:04.141,ns_1@10.242.238.90:<0.21315.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.234494>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.141,ns_1@10.242.238.90:<0.21315.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21306.0>
[ns_server:info,2014-08-19T16:50:04.141,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:50:04.150,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.150,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.151,ns_1@10.242.238.90:<0.21317.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.151,ns_1@10.242.238.90:<0.21317.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.151,ns_1@10.242.238.90:<0.20665.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:04.152,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.153,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.153,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.153,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.153,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.153,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21318.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21318.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21306.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.154,ns_1@10.242.238.90:<0.21315.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21306.0>
[ns_server:debug,2014-08-19T16:50:04.155,ns_1@10.242.238.90:<0.21315.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.155,ns_1@10.242.238.90:<0.21320.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.155,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21306.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21307.0>,<<"cut off">>,<<"cut off">>,[],82,false,false,0,
{1408,452604,153660},
completed,
{<0.21315.0>,#Ref<0.0.0.234507>},
<<"replication_ns_1@10.242.238.90">>,<0.21306.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.155,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21315.0>,{#Ref<0.0.0.234496>,<0.21320.0>}}
[error_logger:info,2014-08-19T16:50:04.155,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21320.0>},
{name,
{new_child_id,
[995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.161,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.163,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.163,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21321.0>
[ns_server:debug,2014-08-19T16:50:04.164,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 437 us
[ns_server:debug,2014-08-19T16:50:04.164,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.164,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.165,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{995,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.167,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 987 state to replica
[ns_server:info,2014-08-19T16:50:04.167,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([987], [])
[rebalance:debug,2014-08-19T16:50:04.172,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.172,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.172,ns_1@10.242.238.90:<0.21325.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.172,ns_1@10.242.238.90:<0.21325.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:04.172,ns_1@10.242.238.90:<0.21323.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[987,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.234648>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[rebalance:info,2014-08-19T16:50:04.173,ns_1@10.242.238.90:<0.20704.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.173,ns_1@10.242.238.90:<0.21323.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21320.0>
[ns_server:info,2014-08-19T16:50:04.173,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.182,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{987,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.182,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21326.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21326.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.183,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21320.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21323.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21320.0>
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21323.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21328.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.184,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21320.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21321.0>,<<"cut off">>,<<"cut off">>,[],85,false,false,0,
{1408,452604,183011},
completed,
{<0.21323.0>,#Ref<0.0.0.234676>},
<<"replication_ns_1@10.242.238.90">>,<0.21320.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.185,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21323.0>,{#Ref<0.0.0.234651>,<0.21328.0>}}
[error_logger:info,2014-08-19T16:50:04.185,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21328.0>},
{name,
{new_child_id,
[987,995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[987,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[rebalance:debug,2014-08-19T16:50:04.189,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.189,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.189,ns_1@10.242.238.90:<0.21329.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.190,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.190,ns_1@10.242.238.90:<0.21329.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.190,ns_1@10.242.238.90:<0.20434.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.191,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.191,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21330.0>
[ns_server:debug,2014-08-19T16:50:04.194,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.194,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4005 us
[ns_server:debug,2014-08-19T16:50:04.194,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.195,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{987,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.208,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.208,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.208,ns_1@10.242.238.90:<0.21332.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.208,ns_1@10.242.238.90:<0.21332.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.208,ns_1@10.242.238.90:<0.20638.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[views:debug,2014-08-19T16:50:04.210,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/741. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.210,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",741,active,1}
[ns_server:debug,2014-08-19T16:50:04.212,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.214,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 908 us
[ns_server:debug,2014-08-19T16:50:04.214,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.214,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.215,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{740,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.221,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.221,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.221,ns_1@10.242.238.90:<0.21333.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.221,ns_1@10.242.238.90:<0.21333.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.222,ns_1@10.242.238.90:<0.20498.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.235,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:04.238,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.238,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.238,ns_1@10.242.238.90:<0.21335.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.239,ns_1@10.242.238.90:<0.21335.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.239,ns_1@10.242.238.90:<0.20370.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.239,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.239,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1896 us
[ns_server:debug,2014-08-19T16:50:04.239,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.240,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{741,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.242,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 981 state to replica
[ns_server:info,2014-08-19T16:50:04.242,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[981,987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([981], [])
[ns_server:debug,2014-08-19T16:50:04.243,ns_1@10.242.238.90:<0.21336.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[981,987,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.234929>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[981,987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.243,ns_1@10.242.238.90:<0.21336.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21328.0>
[ns_server:info,2014-08-19T16:50:04.243,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.249,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{981,1},
{987,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.250,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21338.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21338.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21328.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.251,ns_1@10.242.238.90:<0.21336.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21328.0>
[ns_server:debug,2014-08-19T16:50:04.252,ns_1@10.242.238.90:<0.21336.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.252,ns_1@10.242.238.90:<0.21340.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.252,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21328.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21330.0>,<<"cut off">>,<<"cut off">>,[],88,false,false,0,
{1408,452604,250507},
completed,
{<0.21336.0>,#Ref<0.0.0.234942>},
<<"replication_ns_1@10.242.238.90">>,<0.21328.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.252,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21336.0>,{#Ref<0.0.0.234931>,<0.21340.0>}}
[rebalance:debug,2014-08-19T16:50:04.252,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[error_logger:info,2014-08-19T16:50:04.252,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21340.0>},
{name,
{new_child_id,
[981,987,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[981,987,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.253,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.253,ns_1@10.242.238.90:<0.21341.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.253,ns_1@10.242.238.90:<0.21341.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.253,ns_1@10.242.238.90:<0.20263.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.257,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.259,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[981,987,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.260,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21342.0>
[ns_server:debug,2014-08-19T16:50:04.265,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7294 us
[ns_server:debug,2014-08-19T16:50:04.265,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.265,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.266,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{981,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.267,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 992 state to replica
[ns_server:info,2014-08-19T16:50:04.268,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([992], [])
[ns_server:debug,2014-08-19T16:50:04.269,ns_1@10.242.238.90:<0.21344.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[981,987,992,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235088>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.269,ns_1@10.242.238.90:<0.21344.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21340.0>
[ns_server:info,2014-08-19T16:50:04.270,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:50:04.270,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.270,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.270,ns_1@10.242.238.90:<0.21346.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.270,ns_1@10.242.238.90:<0.21346.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.271,ns_1@10.242.238.90:<0.20288.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:04.276,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{981,1},
{987,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.276,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21347.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.277,ns_1@10.242.238.90:<0.21347.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21340.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21344.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21340.0>
[ns_server:debug,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21344.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.278,ns_1@10.242.238.90:<0.21349.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.279,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21340.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21342.0>,<<"cut off">>,<<"cut off">>,[],91,false,false,0,
{1408,452604,277112},
completed,
{<0.21344.0>,#Ref<0.0.0.235101>},
<<"replication_ns_1@10.242.238.90">>,<0.21340.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.279,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21344.0>,{#Ref<0.0.0.235090>,<0.21349.0>}}
[error_logger:info,2014-08-19T16:50:04.279,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21349.0>},
{name,
{new_child_id,
[981,987,992,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[981,987,992,995,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.284,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.285,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.285,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21350.0>
[ns_server:debug,2014-08-19T16:50:04.288,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3016 us
[ns_server:debug,2014-08-19T16:50:04.288,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.288,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.289,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{992,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.21351.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.21351.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.20338.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:04.290,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 980 state to replica
[ns_server:info,2014-08-19T16:50:04.290,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([980], [])
[ns_server:debug,2014-08-19T16:50:04.291,ns_1@10.242.238.90:<0.21352.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,987,992,995,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235252>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.292,ns_1@10.242.238.90:<0.21352.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21349.0>
[ns_server:info,2014-08-19T16:50:04.292,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.298,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{987,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.298,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21355.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21355.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.299,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21349.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21352.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21349.0>
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21352.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21357.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21349.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21350.0>,<<"cut off">>,<<"cut off">>,[],94,false,false,0,
{1408,452604,299107},
completed,
{<0.21352.0>,#Ref<0.0.0.235265>},
<<"replication_ns_1@10.242.238.90">>,<0.21349.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.300,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21352.0>,{#Ref<0.0.0.235254>,<0.21357.0>}}
[error_logger:info,2014-08-19T16:50:04.300,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21357.0>},
{name,
{new_child_id,
[980,981,987,992,995,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,987,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[rebalance:debug,2014-08-19T16:50:04.304,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.304,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.304,ns_1@10.242.238.90:<0.21358.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.304,ns_1@10.242.238.90:<0.21358.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.304,ns_1@10.242.238.90:<0.20473.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.305,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.307,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,987,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.307,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21359.0>
[ns_server:debug,2014-08-19T16:50:04.308,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2697 us
[ns_server:debug,2014-08-19T16:50:04.308,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.308,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.309,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{980,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.311,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 988 state to replica
[ns_server:info,2014-08-19T16:50:04.311,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,987,988,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([988], [])
[ns_server:debug,2014-08-19T16:50:04.312,ns_1@10.242.238.90:<0.21360.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,987,988,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235404>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,987,988,992,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.313,ns_1@10.242.238.90:<0.21360.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21357.0>
[ns_server:info,2014-08-19T16:50:04.313,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.319,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{987,1},
{988,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.320,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.320,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.320,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.320,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21362.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21362.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.321,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21357.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21360.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21357.0>
[ns_server:debug,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21360.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21364.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.322,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21357.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21359.0>,<<"cut off">>,<<"cut off">>,[],97,false,false,0,
{1408,452604,320586},
completed,
{<0.21360.0>,#Ref<0.0.0.235417>},
<<"replication_ns_1@10.242.238.90">>,<0.21357.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.323,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21360.0>,{#Ref<0.0.0.235406>,<0.21364.0>}}
[error_logger:info,2014-08-19T16:50:04.323,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21364.0>},
{name,
{new_child_id,
[980,981,987,988,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,987,988,992,995,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.328,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.329,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,987,988,992,995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.329,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21365.0>
[ns_server:debug,2014-08-19T16:50:04.332,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.332,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4296 us
[ns_server:debug,2014-08-19T16:50:04.332,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.333,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{988,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.335,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 983 state to replica
[ns_server:info,2014-08-19T16:50:04.335,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,983,987,988,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023] ([983], [])
[ns_server:debug,2014-08-19T16:50:04.336,ns_1@10.242.238.90:<0.21367.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,983,987,988,992,995,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235548>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,983,987,988,992,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.336,ns_1@10.242.238.90:<0.21367.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21364.0>
[ns_server:info,2014-08-19T16:50:04.337,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.342,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{983,1},
{987,1},
{988,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.343,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.343,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.343,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21369.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21369.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.344,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.21364.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.21367.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21364.0>
[ns_server:debug,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.21367.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.21371.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21364.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21365.0>,<<"cut off">>,<<"cut off">>,[],100,false,false,0,
{1408,452604,343746},
completed,
{<0.21367.0>,#Ref<0.0.0.235561>},
<<"replication_ns_1@10.242.238.90">>,<0.21364.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.345,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21367.0>,{#Ref<0.0.0.235550>,<0.21371.0>}}
[error_logger:info,2014-08-19T16:50:04.345,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21371.0>},
{name,
{new_child_id,
[980,981,983,987,988,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,983,987,988,992,995,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:50:04.346,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 736 state to active
[ns_server:debug,2014-08-19T16:50:04.351,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:04.351,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 729 state to active
[ns_server:debug,2014-08-19T16:50:04.351,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,983,987,988,992,995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.352,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21372.0>
[ns_server:debug,2014-08-19T16:50:04.354,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2736 us
[ns_server:debug,2014-08-19T16:50:04.354,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.354,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.355,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{983,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.360,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 982 state to replica
[ns_server:info,2014-08-19T16:50:04.361,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,987,988,992,995,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023] ([982], [])
[ns_server:debug,2014-08-19T16:50:04.362,ns_1@10.242.238.90:<0.21373.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,987,988,992,995,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235708>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,987,988,992,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.362,ns_1@10.242.238.90:<0.21373.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21371.0>
[ns_server:info,2014-08-19T16:50:04.362,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.368,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{987,1},
{988,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.369,ns_1@10.242.238.90:<0.21376.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21376.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21371.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21373.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21371.0>
[ns_server:debug,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21373.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.370,ns_1@10.242.238.90:<0.21378.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.371,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21371.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21372.0>,<<"cut off">>,<<"cut off">>,[],103,false,false,0,
{1408,452604,369351},
completed,
{<0.21373.0>,#Ref<0.0.0.235721>},
<<"replication_ns_1@10.242.238.90">>,<0.21371.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.371,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21373.0>,{#Ref<0.0.0.235710>,<0.21378.0>}}
[error_logger:info,2014-08-19T16:50:04.371,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21378.0>},
{name,
{new_child_id,
[980,981,982,983,987,988,992,995,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,987,988,992,995,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:50:04.372,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 730 state to active
[ns_server:debug,2014-08-19T16:50:04.377,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.377,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,987,988,992,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.377,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21379.0>
[ns_server:debug,2014-08-19T16:50:04.380,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.380,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3523 us
[ns_server:debug,2014-08-19T16:50:04.381,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.382,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{982,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.383,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 991 state to replica
[ns_server:info,2014-08-19T16:50:04.384,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,987,988,991,992,995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023] ([991], [])
[ns_server:debug,2014-08-19T16:50:04.386,ns_1@10.242.238.90:<0.21380.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,987,988,991,992,995,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.235858>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,987,988,991,992,995,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.386,ns_1@10.242.238.90:<0.21380.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21378.0>
[ns_server:info,2014-08-19T16:50:04.386,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.393,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{987,1},
{988,1},
{991,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.393,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21382.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[views:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/736. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21382.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",736,active,1}
[rebalance:info,2014-08-19T16:50:04.394,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21378.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21380.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21378.0>
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21380.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21384.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.395,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21378.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21379.0>,<<"cut off">>,<<"cut off">>,[],106,false,false,0,
{1408,452604,394085},
completed,
{<0.21380.0>,#Ref<0.0.0.235872>},
<<"replication_ns_1@10.242.238.90">>,<0.21378.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.396,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21380.0>,{#Ref<0.0.0.235860>,<0.21384.0>}}
[error_logger:info,2014-08-19T16:50:04.396,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21384.0>},
{name,
{new_child_id,
[980,981,982,983,987,988,991,992,995,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,987,988,991,992,995,998,
999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.400,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.402,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,987,988,991,992,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.402,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21385.0>
[ns_server:info,2014-08-19T16:50:04.403,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 742 state to active
[ns_server:debug,2014-08-19T16:50:04.406,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6036 us
[ns_server:debug,2014-08-19T16:50:04.407,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.407,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.408,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{991,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.409,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 984 state to replica
[ns_server:info,2014-08-19T16:50:04.409,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,991,992,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023] ([984], [])
[ns_server:debug,2014-08-19T16:50:04.411,ns_1@10.242.238.90:<0.21387.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,991,992,995,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236033>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,991,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.411,ns_1@10.242.238.90:<0.21387.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21384.0>
[ns_server:info,2014-08-19T16:50:04.411,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.418,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{991,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21389.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.419,ns_1@10.242.238.90:<0.21389.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21384.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21387.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21384.0>
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21387.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21391.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.420,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21384.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21385.0>,<<"cut off">>,<<"cut off">>,[],109,false,false,0,
{1408,452604,419301},
completed,
{<0.21387.0>,#Ref<0.0.0.236046>},
<<"replication_ns_1@10.242.238.90">>,<0.21384.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.421,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21387.0>,{#Ref<0.0.0.236035>,<0.21391.0>}}
[error_logger:info,2014-08-19T16:50:04.421,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21391.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,991,992,995,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,991,992,995,
998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.425,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.427,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,991,992,995,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.427,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21392.0>
[ns_server:debug,2014-08-19T16:50:04.428,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.428,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3467 us
[ns_server:debug,2014-08-19T16:50:04.429,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.429,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{984,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.431,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 989 state to replica
[ns_server:info,2014-08-19T16:50:04.431,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,991,992,995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023] ([989], [])
[ns_server:debug,2014-08-19T16:50:04.432,ns_1@10.242.238.90:<0.21393.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,995,
998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236167>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,991,992,995,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.432,ns_1@10.242.238.90:<0.21393.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21391.0>
[ns_server:info,2014-08-19T16:50:04.432,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.438,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{991,1},
{992,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.439,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.439,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.439,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21395.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21395.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.440,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.21391.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.21393.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21391.0>
[ns_server:debug,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.21393.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.21397.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21391.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21392.0>,<<"cut off">>,<<"cut off">>,[],112,false,false,0,
{1408,452604,439770},
completed,
{<0.21393.0>,#Ref<0.0.0.236180>},
<<"replication_ns_1@10.242.238.90">>,<0.21391.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.441,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21393.0>,{#Ref<0.0.0.236169>,<0.21397.0>}}
[error_logger:info,2014-08-19T16:50:04.441,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21397.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,995,
998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,991,992,
995,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.447,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,991,992,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.448,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21399.0>
[ns_server:debug,2014-08-19T16:50:04.448,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.451,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2756 us
[ns_server:debug,2014-08-19T16:50:04.451,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.452,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.452,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{989,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.455,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 993 state to replica
[ns_server:info,2014-08-19T16:50:04.456,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,991,992,993,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023] ([993], [])
[ns_server:debug,2014-08-19T16:50:04.457,ns_1@10.242.238.90:<0.21400.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236305>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.457,ns_1@10.242.238.90:<0.21400.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21397.0>
[ns_server:info,2014-08-19T16:50:04.458,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.465,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{991,1},
{992,1},
{993,1},
{995,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.466,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.466,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.466,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.466,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.466,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21402.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21402.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21397.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.467,ns_1@10.242.238.90:<0.21400.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21397.0>
[ns_server:debug,2014-08-19T16:50:04.468,ns_1@10.242.238.90:<0.21400.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.468,ns_1@10.242.238.90:<0.21404.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.468,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21397.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21399.0>,<<"cut off">>,<<"cut off">>,[],115,false,false,0,
{1408,452604,466589},
completed,
{<0.21400.0>,#Ref<0.0.0.236318>},
<<"replication_ns_1@10.242.238.90">>,<0.21397.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.468,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21400.0>,{#Ref<0.0.0.236307>,<0.21404.0>}}
[error_logger:info,2014-08-19T16:50:04.468,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21404.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,991,992,
993,995,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.472,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.476,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.476,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21406.0>
[ns_server:debug,2014-08-19T16:50:04.477,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.477,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4423 us
[ns_server:debug,2014-08-19T16:50:04.477,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.478,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{993,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:04.479,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/729. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.479,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",729,active,1}
[ns_server:info,2014-08-19T16:50:04.480,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 997 state to replica
[ns_server:info,2014-08-19T16:50:04.480,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,991,992,993,995,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023] ([997], [])
[ns_server:debug,2014-08-19T16:50:04.481,ns_1@10.242.238.90:<0.21407.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236470>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.482,ns_1@10.242.238.90:<0.21407.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21404.0>
[ns_server:info,2014-08-19T16:50:04.482,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:50:04.483,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.483,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.483,ns_1@10.242.238.90:<0.21409.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.483,ns_1@10.242.238.90:<0.21409.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.483,ns_1@10.242.238.90:<0.20574.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:04.488,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{991,1},
{992,1},
{993,1},
{995,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.489,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.489,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.489,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21410.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21410.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21404.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.490,ns_1@10.242.238.90:<0.21407.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21404.0>
[ns_server:debug,2014-08-19T16:50:04.491,ns_1@10.242.238.90:<0.21407.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.491,ns_1@10.242.238.90:<0.21412.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.491,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21404.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21406.0>,<<"cut off">>,<<"cut off">>,[],118,false,false,0,
{1408,452604,489747},
completed,
{<0.21407.0>,#Ref<0.0.0.236483>},
<<"replication_ns_1@10.242.238.90">>,<0.21404.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.491,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21407.0>,{#Ref<0.0.0.236472>,<0.21412.0>}}
[error_logger:info,2014-08-19T16:50:04.491,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21412.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,991,992,
993,995,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:50:04.492,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 731 state to active
[ns_server:debug,2014-08-19T16:50:04.496,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.497,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.498,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21413.0>
[ns_server:debug,2014-08-19T16:50:04.499,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2834 us
[ns_server:debug,2014-08-19T16:50:04.499,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.500,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.500,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{997,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.507,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 996 state to replica
[ns_server:info,2014-08-19T16:50:04.507,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,991,992,993,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023] ([996], [])
[ns_server:debug,2014-08-19T16:50:04.508,ns_1@10.242.238.90:<0.21414.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236634>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.509,ns_1@10.242.238.90:<0.21414.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21412.0>
[ns_server:info,2014-08-19T16:50:04.509,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.515,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{991,1},
{992,1},
{993,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.516,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.516,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.516,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.516,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.516,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21417.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21417.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21412.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21414.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21412.0>
[ns_server:debug,2014-08-19T16:50:04.517,ns_1@10.242.238.90:<0.21414.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.518,ns_1@10.242.238.90:<0.21419.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.518,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21412.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21413.0>,<<"cut off">>,<<"cut off">>,[],121,false,false,0,
{1408,452604,516573},
completed,
{<0.21414.0>,#Ref<0.0.0.236648>},
<<"replication_ns_1@10.242.238.90">>,<0.21412.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.518,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21414.0>,{#Ref<0.0.0.236636>,<0.21419.0>}}
[ns_server:info,2014-08-19T16:50:04.518,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 737 state to active
[error_logger:info,2014-08-19T16:50:04.518,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21419.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,991,992,
993,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:04.524,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.524,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21420.0>
[ns_server:debug,2014-08-19T16:50:04.526,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:04.527,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:04.527,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.527,ns_1@10.242.238.90:<0.21421.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.527,ns_1@10.242.238.90:<0.21421.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.528,ns_1@10.242.238.90:<0.20599.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.530,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.530,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4147 us
[ns_server:debug,2014-08-19T16:50:04.531,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.532,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{996,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.533,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 994 state to replica
[ns_server:info,2014-08-19T16:50:04.533,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023] ([994], [])
[ns_server:debug,2014-08-19T16:50:04.534,ns_1@10.242.238.90:<0.21422.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236802>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.534,ns_1@10.242.238.90:<0.21422.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21419.0>
[ns_server:info,2014-08-19T16:50:04.534,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.542,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.543,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21425.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21425.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21419.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.544,ns_1@10.242.238.90:<0.21422.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21419.0>
[ns_server:debug,2014-08-19T16:50:04.545,ns_1@10.242.238.90:<0.21422.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.545,ns_1@10.242.238.90:<0.21427.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.545,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21419.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21420.0>,<<"cut off">>,<<"cut off">>,[],124,false,false,0,
{1408,452604,543396},
completed,
{<0.21422.0>,#Ref<0.0.0.236815>},
<<"replication_ns_1@10.242.238.90">>,<0.21419.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.545,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21422.0>,{#Ref<0.0.0.236804>,<0.21427.0>}}
[ns_server:info,2014-08-19T16:50:04.545,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 738 state to active
[error_logger:info,2014-08-19T16:50:04.545,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21427.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,991,992,
993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[views:debug,2014-08-19T16:50:04.546,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/742. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.546,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",742,active,1}
[ns_server:debug,2014-08-19T16:50:04.551,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.551,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.552,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21428.0>
[ns_server:info,2014-08-19T16:50:04.557,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 733 state to active
[ns_server:debug,2014-08-19T16:50:04.558,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6853 us
[ns_server:debug,2014-08-19T16:50:04.558,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.559,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.560,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{994,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.561,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 990 state to replica
[ns_server:info,2014-08-19T16:50:04.561,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023] ([990], [])
[ns_server:debug,2014-08-19T16:50:04.565,ns_1@10.242.238.90:<0.21429.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.236985>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.565,ns_1@10.242.238.90:<0.21429.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21427.0>
[ns_server:info,2014-08-19T16:50:04.565,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.571,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.572,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.572,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21431.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21431.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.573,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21427.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21429.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21427.0>
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21429.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21433.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.574,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21427.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21428.0>,<<"cut off">>,<<"cut off">>,[],127,false,false,0,
{1408,452604,572830},
completed,
{<0.21429.0>,#Ref<0.0.0.236998>},
<<"replication_ns_1@10.242.238.90">>,<0.21427.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.575,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21429.0>,{#Ref<0.0.0.236987>,<0.21433.0>}}
[error_logger:info,2014-08-19T16:50:04.575,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21433.0>},
{name,
{new_child_id,
[980,981,982,983,984,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:50:04.577,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 728 state to active
[ns_server:debug,2014-08-19T16:50:04.580,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.581,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,987,988,989,990,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.581,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21435.0>
[ns_server:debug,2014-08-19T16:50:04.583,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.583,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2580 us
[ns_server:debug,2014-08-19T16:50:04.584,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.585,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{990,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.607,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:04.609,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 727 state to active
[ns_server:debug,2014-08-19T16:50:04.609,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2704 us
[ns_server:debug,2014-08-19T16:50:04.610,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.611,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.611,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{486,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:04.613,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/730. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.613,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",730,active,1}
[ns_server:info,2014-08-19T16:50:04.623,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 735 state to active
[ns_server:debug,2014-08-19T16:50:04.630,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.632,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.632,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2014 us
[ns_server:debug,2014-08-19T16:50:04.632,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.633,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{736,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.651,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.653,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1140 us
[ns_server:debug,2014-08-19T16:50:04.653,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.653,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.654,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{729,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.656,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 739 state to active
[ns_server:debug,2014-08-19T16:50:04.687,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:04.688,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 734 state to active
[views:debug,2014-08-19T16:50:04.688,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/738. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.688,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",738,active,1}
[ns_server:debug,2014-08-19T16:50:04.689,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2384 us
[ns_server:debug,2014-08-19T16:50:04.689,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.690,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.690,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{730,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.704,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:04.706,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 732 state to active
[ns_server:debug,2014-08-19T16:50:04.708,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3426 us
[ns_server:debug,2014-08-19T16:50:04.708,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.708,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.709,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{481,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.724,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.728,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3207 us
[ns_server:debug,2014-08-19T16:50:04.728,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.728,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.729,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{742,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.739,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 726 state to active
[ns_server:debug,2014-08-19T16:50:04.744,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.747,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.748,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.748,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4024 us
[ns_server:debug,2014-08-19T16:50:04.749,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{478,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:04.755,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/728. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.755,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",728,active,1}
[ns_server:debug,2014-08-19T16:50:04.765,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.769,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.769,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3344 us
[ns_server:debug,2014-08-19T16:50:04.769,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.770,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{480,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.785,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.788,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2642 us
[ns_server:debug,2014-08-19T16:50:04.788,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.788,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.789,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{476,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.795,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 986 state to replica
[ns_server:info,2014-08-19T16:50:04.795,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,986,987,988,989,990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023] ([986], [])
[ns_server:debug,2014-08-19T16:50:04.796,ns_1@10.242.238.90:<0.21443.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.237513>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.797,ns_1@10.242.238.90:<0.21443.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21433.0>
[ns_server:info,2014-08-19T16:50:04.797,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.810,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.812,ns_1@10.242.238.90:<0.21445.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21445.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21433.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.813,ns_1@10.242.238.90:<0.21443.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21433.0>
[ns_server:debug,2014-08-19T16:50:04.814,ns_1@10.242.238.90:<0.21443.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.814,ns_1@10.242.238.90:<0.21447.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.814,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21433.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21435.0>,<<"cut off">>,<<"cut off">>,[],130,false,false,0,
{1408,452604,812330},
completed,
{<0.21443.0>,#Ref<0.0.0.237527>},
<<"replication_ns_1@10.242.238.90">>,<0.21433.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.814,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21443.0>,{#Ref<0.0.0.237516>,<0.21447.0>}}
[error_logger:info,2014-08-19T16:50:04.814,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21447.0>},
{name,
{new_child_id,
[980,981,982,983,984,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[views:debug,2014-08-19T16:50:04.818,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/739. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.818,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",739,active,1}
[ns_server:debug,2014-08-19T16:50:04.818,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.820,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,986,987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.821,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21448.0>
[ns_server:debug,2014-08-19T16:50:04.822,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.822,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3337 us
[ns_server:debug,2014-08-19T16:50:04.822,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.823,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{986,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.840,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.846,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.847,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{731,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:04.860,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/737. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.863,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 25 us
[ns_server:debug,2014-08-19T16:50:04.863,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",737,active,1}
[ns_server:debug,2014-08-19T16:50:04.863,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.884,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.885,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 310 us
[ns_server:debug,2014-08-19T16:50:04.885,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.885,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.886,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{737,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:04.888,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 985 state to replica
[ns_server:info,2014-08-19T16:50:04.888,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([985], [])
[ns_server:debug,2014-08-19T16:50:04.889,ns_1@10.242.238.90:<0.21451.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.237752>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:04.889,ns_1@10.242.238.90:<0.21451.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21447.0>
[ns_server:info,2014-08-19T16:50:04.889,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:04.896,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:04.897,ns_1@10.242.238.90:<0.21454.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21454.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21447.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21451.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21447.0>
[ns_server:debug,2014-08-19T16:50:04.898,ns_1@10.242.238.90:<0.21451.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:04.899,ns_1@10.242.238.90:<0.21456.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:04.899,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21447.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21448.0>,<<"cut off">>,<<"cut off">>,[],133,false,false,0,
{1408,452604,897329},
completed,
{<0.21451.0>,#Ref<0.0.0.237765>},
<<"replication_ns_1@10.242.238.90">>,<0.21447.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:04.899,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.21451.0>,{#Ref<0.0.0.237754>,<0.21456.0>}}
[error_logger:info,2014-08-19T16:50:04.899,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.21456.0>},
{name,
{new_child_id,
[980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[views:debug,2014-08-19T16:50:04.902,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/735. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.902,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",735,active,1}
[ns_server:debug,2014-08-19T16:50:04.903,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.905,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:04.906,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21457.0>
[ns_server:debug,2014-08-19T16:50:04.906,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2361 us
[ns_server:debug,2014-08-19T16:50:04.906,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.907,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.907,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{985,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.923,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.926,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.926,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3203 us
[ns_server:debug,2014-08-19T16:50:04.927,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.927,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{738,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:04.935,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/733. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.935,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",733,active,1}
[ns_server:debug,2014-08-19T16:50:04.944,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.947,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2914 us
[ns_server:debug,2014-08-19T16:50:04.947,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.948,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.948,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{733,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.966,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.970,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.970,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3226 us
[ns_server:debug,2014-08-19T16:50:04.970,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:50:04.971,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/731. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:04.971,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",731,active,1}
[ns_server:debug,2014-08-19T16:50:04.971,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{728,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:04.984,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:04.991,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.991,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7472 us
[ns_server:debug,2014-08-19T16:50:04.992,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:04.993,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{484,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:05.003,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/727. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:05.003,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",727,active,1}
[ns_server:debug,2014-08-19T16:50:05.009,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.012,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.012,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3325 us
[ns_server:debug,2014-08-19T16:50:05.013,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.013,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{727,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:05.036,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/734. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:05.036,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",734,active,1}
[ns_server:debug,2014-08-19T16:50:05.038,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.040,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1206 us
[ns_server:debug,2014-08-19T16:50:05.040,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.040,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.041,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{735,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.056,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.059,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.060,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3174 us
[ns_server:debug,2014-08-19T16:50:05.060,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.061,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{472,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:05.071,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/732. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:05.071,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",732,active,1}
[ns_server:debug,2014-08-19T16:50:05.079,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.081,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2558 us
[ns_server:debug,2014-08-19T16:50:05.081,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.082,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.082,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{739,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.100,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.103,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.103,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3229 us
[ns_server:debug,2014-08-19T16:50:05.103,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:50:05.104,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/726. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:05.104,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",726,active,1}
[ns_server:debug,2014-08-19T16:50:05.104,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{482,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.131,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.138,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7125 us
[ns_server:debug,2014-08-19T16:50:05.138,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.139,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.140,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{734,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.158,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.160,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1640 us
[ns_server:debug,2014-08-19T16:50:05.160,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.160,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.161,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{732,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.174,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.177,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2406 us
[ns_server:debug,2014-08-19T16:50:05.177,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.177,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.178,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{483,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.199,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.202,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3371 us
[ns_server:debug,2014-08-19T16:50:05.203,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.203,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.204,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{726,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.218,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.220,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1852 us
[ns_server:debug,2014-08-19T16:50:05.220,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.220,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.221,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{485,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:05.239,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:05.242,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.242,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3210 us
[ns_server:debug,2014-08-19T16:50:05.243,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:05.243,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{474,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:05.261,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 979 state to replica
[ns_server:info,2014-08-19T16:50:05.265,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 979 to state replica
[ns_server:debug,2014-08-19T16:50:05.302,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_979_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.303,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[979]},
{checkpoints,[{979,0}]},
{name,<<"replication_building_979_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[979]},
{takeover,false},
{suffix,"building_979_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",979,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:05.304,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21481.0>
[rebalance:debug,2014-08-19T16:50:05.304,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.305,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1362.1>,#Ref<16550.0.1.86904>}]}
[rebalance:info,2014-08-19T16:50:05.305,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 979
[rebalance:debug,2014-08-19T16:50:05.305,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1362.1>,#Ref<16550.0.1.86904>}]
[ns_server:debug,2014-08-19T16:50:05.306,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.317,ns_1@10.242.238.90:<0.21482.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 979
[ns_server:info,2014-08-19T16:50:05.323,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 725 state to replica
[ns_server:info,2014-08-19T16:50:05.328,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 725 to state replica
[ns_server:debug,2014-08-19T16:50:05.346,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 979. Nacking mccouch update.
[views:debug,2014-08-19T16:50:05.346,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/979. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:05.347,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",979,replica,0}
[ns_server:debug,2014-08-19T16:50:05.347,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,987,756,740,1019,1003,990,759,743,727,1022,1006,993,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,1018,
1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,1020,1004,991,760,
744,728,1023,1007,994,763,747,731,1010,981,766,734,1013,753,1000]
[ns_server:debug,2014-08-19T16:50:05.373,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_725_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.375,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[725]},
{checkpoints,[{725,0}]},
{name,<<"replication_building_725_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[725]},
{takeover,false},
{suffix,"building_725_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",725,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:05.375,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21492.0>
[rebalance:debug,2014-08-19T16:50:05.376,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.376,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1382.1>,#Ref<16550.0.1.87022>}]}
[rebalance:info,2014-08-19T16:50:05.376,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 725
[rebalance:debug,2014-08-19T16:50:05.377,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1382.1>,#Ref<16550.0.1.87022>}]
[ns_server:debug,2014-08-19T16:50:05.377,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21493.0> (ok)
[ns_server:debug,2014-08-19T16:50:05.377,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.380,ns_1@10.242.238.90:<0.21494.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 725
[views:debug,2014-08-19T16:50:05.397,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/979. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:05.397,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",979,replica,0}
[ns_server:info,2014-08-19T16:50:05.454,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 978 state to replica
[ns_server:info,2014-08-19T16:50:05.458,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 978 to state replica
[ns_server:debug,2014-08-19T16:50:05.472,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 725. Nacking mccouch update.
[views:debug,2014-08-19T16:50:05.472,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/725. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:05.472,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",725,pending,0}
[ns_server:debug,2014-08-19T16:50:05.473,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,987,756,740,1019,1003,990,759,743,727,1022,1006,993,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,1018,
1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,1004,991,
760,744,728,1023,1007,994,763,747,731,1010,981,766,734,1013,753,1000]
[ns_server:debug,2014-08-19T16:50:05.490,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_978_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.492,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[978]},
{checkpoints,[{978,0}]},
{name,<<"replication_building_978_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[978]},
{takeover,false},
{suffix,"building_978_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",978,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:05.492,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21512.0>
[rebalance:debug,2014-08-19T16:50:05.493,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.493,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1440.1>,#Ref<16550.0.1.87319>}]}
[rebalance:info,2014-08-19T16:50:05.493,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 978
[rebalance:debug,2014-08-19T16:50:05.494,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1440.1>,#Ref<16550.0.1.87319>}]
[ns_server:debug,2014-08-19T16:50:05.495,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.512,ns_1@10.242.238.90:<0.21513.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 978
[views:debug,2014-08-19T16:50:05.513,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/725. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:05.514,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",725,pending,0}
[rebalance:debug,2014-08-19T16:50:05.515,ns_1@10.242.238.90:<0.21494.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:05.515,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21494.0> (ok)
[ns_server:info,2014-08-19T16:50:05.518,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 724 state to replica
[ns_server:info,2014-08-19T16:50:05.524,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 724 to state replica
[ns_server:debug,2014-08-19T16:50:05.568,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_724_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.570,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[724]},
{checkpoints,[{724,0}]},
{name,<<"replication_building_724_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[724]},
{takeover,false},
{suffix,"building_724_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",724,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:05.571,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21517.0>
[rebalance:debug,2014-08-19T16:50:05.571,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.571,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1460.1>,#Ref<16550.0.1.87436>}]}
[rebalance:info,2014-08-19T16:50:05.572,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 724
[rebalance:debug,2014-08-19T16:50:05.572,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1460.1>,#Ref<16550.0.1.87436>}]
[ns_server:debug,2014-08-19T16:50:05.573,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:05.573,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21518.0> (ok)
[rebalance:debug,2014-08-19T16:50:05.574,ns_1@10.242.238.90:<0.21519.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 724
[ns_server:info,2014-08-19T16:50:05.642,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 977 state to replica
[ns_server:info,2014-08-19T16:50:05.646,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 977 to state replica
[ns_server:debug,2014-08-19T16:50:05.647,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 978. Nacking mccouch update.
[views:debug,2014-08-19T16:50:05.647,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/978. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:05.647,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",978,replica,0}
[ns_server:debug,2014-08-19T16:50:05.647,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,987,756,740,1019,1003,990,759,743,727,1022,1006,993,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,1018,
1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,1004,991,
760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,1000]
[ns_server:debug,2014-08-19T16:50:05.677,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_977_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.679,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[977]},
{checkpoints,[{977,0}]},
{name,<<"replication_building_977_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[977]},
{takeover,false},
{suffix,"building_977_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",977,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:05.679,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21537.0>
[rebalance:debug,2014-08-19T16:50:05.679,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.680,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1517.1>,#Ref<16550.0.1.87684>}]}
[rebalance:info,2014-08-19T16:50:05.680,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 977
[rebalance:debug,2014-08-19T16:50:05.680,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1517.1>,#Ref<16550.0.1.87684>}]
[ns_server:debug,2014-08-19T16:50:05.681,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.700,ns_1@10.242.238.90:<0.21538.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 977
[ns_server:info,2014-08-19T16:50:05.706,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 723 state to replica
[ns_server:info,2014-08-19T16:50:05.712,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 723 to state replica
[views:debug,2014-08-19T16:50:05.714,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/978. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:05.715,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",978,replica,0}
[ns_server:debug,2014-08-19T16:50:05.759,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_723_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.760,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[723]},
{checkpoints,[{723,0}]},
{name,<<"replication_building_723_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[723]},
{takeover,false},
{suffix,"building_723_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",723,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:05.760,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21556.0>
[rebalance:debug,2014-08-19T16:50:05.761,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.761,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1537.1>,#Ref<16550.0.1.87798>}]}
[rebalance:info,2014-08-19T16:50:05.761,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 723
[rebalance:debug,2014-08-19T16:50:05.761,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1537.1>,#Ref<16550.0.1.87798>}]
[ns_server:debug,2014-08-19T16:50:05.762,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21557.0> (ok)
[ns_server:debug,2014-08-19T16:50:05.762,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.763,ns_1@10.242.238.90:<0.21558.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 723
[ns_server:debug,2014-08-19T16:50:05.798,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 724. Nacking mccouch update.
[views:debug,2014-08-19T16:50:05.798,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/724. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:05.798,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",724,pending,0}
[ns_server:debug,2014-08-19T16:50:05.798,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,987,756,740,724,1019,1003,990,759,743,727,1022,1006,993,
762,746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,
1018,1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,
1004,991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,
1000]
[ns_server:info,2014-08-19T16:50:05.833,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 976 state to replica
[ns_server:info,2014-08-19T16:50:05.845,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 976 to state replica
[views:debug,2014-08-19T16:50:05.865,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/724. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:05.865,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",724,pending,0}
[ns_server:debug,2014-08-19T16:50:05.877,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_976_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.878,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[976]},
{checkpoints,[{976,0}]},
{name,<<"replication_building_976_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[976]},
{takeover,false},
{suffix,"building_976_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",976,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:05.879,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21562.0>
[rebalance:debug,2014-08-19T16:50:05.879,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.879,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1594.1>,#Ref<16550.0.1.88063>}]}
[rebalance:info,2014-08-19T16:50:05.880,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 976
[rebalance:debug,2014-08-19T16:50:05.880,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1594.1>,#Ref<16550.0.1.88063>}]
[ns_server:debug,2014-08-19T16:50:05.881,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.901,ns_1@10.242.238.90:<0.21563.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 976
[ns_server:info,2014-08-19T16:50:05.908,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 722 state to replica
[ns_server:info,2014-08-19T16:50:05.915,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 722 to state replica
[ns_server:debug,2014-08-19T16:50:05.962,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_722_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:05.963,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[722]},
{checkpoints,[{722,0}]},
{name,<<"replication_building_722_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[722]},
{takeover,false},
{suffix,"building_722_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",722,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:05.964,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21581.0>
[rebalance:debug,2014-08-19T16:50:05.964,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:05.964,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1614.1>,#Ref<16550.0.1.88179>}]}
[rebalance:info,2014-08-19T16:50:05.965,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 722
[rebalance:debug,2014-08-19T16:50:05.965,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1614.1>,#Ref<16550.0.1.88179>}]
[ns_server:debug,2014-08-19T16:50:05.966,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21582.0> (ok)
[ns_server:debug,2014-08-19T16:50:05.966,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:05.967,ns_1@10.242.238.90:<0.21583.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 722
[ns_server:debug,2014-08-19T16:50:06.015,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 977. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.016,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/977. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.016,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",977,replica,0}
[ns_server:debug,2014-08-19T16:50:06.016,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,759,743,727,1022,1006,993,977,762,746,
730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,1018,1002,
989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,1011,998,
982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,1004,991,760,
744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,1000,987,740,
1019]
[ns_server:info,2014-08-19T16:50:06.034,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 975 state to replica
[ns_server:info,2014-08-19T16:50:06.038,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 975 to state replica
[views:debug,2014-08-19T16:50:06.066,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/977. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.066,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",977,replica,0}
[ns_server:debug,2014-08-19T16:50:06.070,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_975_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.071,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[975]},
{checkpoints,[{975,0}]},
{name,<<"replication_building_975_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[975]},
{takeover,false},
{suffix,"building_975_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",975,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:06.072,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21587.0>
[rebalance:debug,2014-08-19T16:50:06.072,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.073,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1671.1>,#Ref<16550.0.1.88441>}]}
[rebalance:info,2014-08-19T16:50:06.073,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 975
[rebalance:debug,2014-08-19T16:50:06.073,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1671.1>,#Ref<16550.0.1.88441>}]
[ns_server:debug,2014-08-19T16:50:06.074,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.091,ns_1@10.242.238.90:<0.21588.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 975
[ns_server:info,2014-08-19T16:50:06.097,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 721 state to replica
[ns_server:info,2014-08-19T16:50:06.103,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 721 to state replica
[ns_server:debug,2014-08-19T16:50:06.149,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_721_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.151,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[721]},
{checkpoints,[{721,0}]},
{name,<<"replication_building_721_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[721]},
{takeover,false},
{suffix,"building_721_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",721,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:06.151,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21606.0>
[rebalance:debug,2014-08-19T16:50:06.152,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.152,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1691.1>,#Ref<16550.0.1.88551>}]}
[rebalance:info,2014-08-19T16:50:06.152,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 721
[rebalance:debug,2014-08-19T16:50:06.153,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1691.1>,#Ref<16550.0.1.88551>}]
[ns_server:debug,2014-08-19T16:50:06.154,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21607.0> (ok)
[ns_server:debug,2014-08-19T16:50:06.154,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.155,ns_1@10.242.238.90:<0.21608.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 721
[ns_server:debug,2014-08-19T16:50:06.208,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 723. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.208,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/723. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:06.208,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",723,pending,0}
[ns_server:debug,2014-08-19T16:50:06.208,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,759,743,727,1022,1006,993,977,762,746,
730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,723,1018,
1002,989,758,742,726,1021,1005,992,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,1004,991,
760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,1000,987,
740,1019]
[ns_server:info,2014-08-19T16:50:06.229,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 974 state to replica
[ns_server:info,2014-08-19T16:50:06.233,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 974 to state replica
[views:debug,2014-08-19T16:50:06.258,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/723. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:06.258,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",723,pending,0}
[rebalance:debug,2014-08-19T16:50:06.259,ns_1@10.242.238.90:<0.21482.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:06.259,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21482.0> (ok)
[ns_server:debug,2014-08-19T16:50:06.266,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_974_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.267,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[974]},
{checkpoints,[{974,0}]},
{name,<<"replication_building_974_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[974]},
{takeover,false},
{suffix,"building_974_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",974,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:06.268,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21612.0>
[rebalance:debug,2014-08-19T16:50:06.268,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.269,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1748.1>,#Ref<16550.0.1.88834>}]}
[rebalance:info,2014-08-19T16:50:06.269,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 974
[rebalance:debug,2014-08-19T16:50:06.269,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1748.1>,#Ref<16550.0.1.88834>}]
[ns_server:debug,2014-08-19T16:50:06.270,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.287,ns_1@10.242.238.90:<0.21613.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 974
[ns_server:info,2014-08-19T16:50:06.294,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 720 state to replica
[ns_server:info,2014-08-19T16:50:06.299,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 720 to state replica
[ns_server:debug,2014-08-19T16:50:06.344,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_720_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.345,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[720]},
{checkpoints,[{720,0}]},
{name,<<"replication_building_720_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[720]},
{takeover,false},
{suffix,"building_720_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",720,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:06.346,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21631.0>
[rebalance:debug,2014-08-19T16:50:06.346,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.347,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1776.1>,#Ref<16550.0.1.89028>}]}
[rebalance:info,2014-08-19T16:50:06.347,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 720
[rebalance:debug,2014-08-19T16:50:06.347,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1776.1>,#Ref<16550.0.1.89028>}]
[ns_server:debug,2014-08-19T16:50:06.348,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21632.0> (ok)
[ns_server:debug,2014-08-19T16:50:06.348,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.349,ns_1@10.242.238.90:<0.21633.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 720
[ns_server:debug,2014-08-19T16:50:06.398,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 976. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.398,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/976. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.398,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",976,replica,0}
[ns_server:debug,2014-08-19T16:50:06.398,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,759,743,727,1022,1006,993,977,762,746,
730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,723,1018,
1002,989,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,
1004,991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,
1000,987,740,1019]
[ns_server:info,2014-08-19T16:50:06.421,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 973 state to replica
[ns_server:info,2014-08-19T16:50:06.425,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 973 to state replica
[views:debug,2014-08-19T16:50:06.432,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/976. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.432,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",976,replica,0}
[ns_server:debug,2014-08-19T16:50:06.458,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_973_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.459,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[973]},
{checkpoints,[{973,0}]},
{name,<<"replication_building_973_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[973]},
{takeover,false},
{suffix,"building_973_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",973,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:06.460,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21638.0>
[rebalance:debug,2014-08-19T16:50:06.460,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.461,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1835.1>,#Ref<16550.0.1.89327>}]}
[rebalance:info,2014-08-19T16:50:06.461,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 973
[rebalance:debug,2014-08-19T16:50:06.461,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1835.1>,#Ref<16550.0.1.89327>}]
[ns_server:debug,2014-08-19T16:50:06.462,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.480,ns_1@10.242.238.90:<0.21653.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 973
[ns_server:info,2014-08-19T16:50:06.485,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 719 state to replica
[ns_server:info,2014-08-19T16:50:06.490,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 719 to state replica
[ns_server:debug,2014-08-19T16:50:06.507,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 974. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.507,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/974. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.507,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",974,replica,0}
[ns_server:debug,2014-08-19T16:50:06.508,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,974,759,743,727,1022,1006,993,977,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,723,
1018,1002,989,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,
732,1011,998,982,767,751,735,1014,985,754,738,1017,1001,988,757,741,725,1020,
1004,991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,
1000,987,740,1019]
[ns_server:debug,2014-08-19T16:50:06.536,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_719_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.537,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[719]},
{checkpoints,[{719,0}]},
{name,<<"replication_building_719_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[719]},
{takeover,false},
{suffix,"building_719_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",719,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:06.538,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21657.0>
[rebalance:debug,2014-08-19T16:50:06.538,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.539,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1869.1>,#Ref<16550.0.1.89498>}]}
[rebalance:info,2014-08-19T16:50:06.539,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 719
[rebalance:debug,2014-08-19T16:50:06.539,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1869.1>,#Ref<16550.0.1.89498>}]
[ns_server:debug,2014-08-19T16:50:06.540,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21658.0> (ok)
[ns_server:debug,2014-08-19T16:50:06.540,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.541,ns_1@10.242.238.90:<0.21659.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 719
[views:debug,2014-08-19T16:50:06.558,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/974. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.558,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",974,replica,0}
[ns_server:info,2014-08-19T16:50:06.609,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 972 state to replica
[ns_server:info,2014-08-19T16:50:06.613,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 972 to state replica
[ns_server:debug,2014-08-19T16:50:06.645,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_972_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.646,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[972]},
{checkpoints,[{972,0}]},
{name,<<"replication_building_972_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[972]},
{takeover,false},
{suffix,"building_972_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",972,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:06.647,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21677.0>
[rebalance:debug,2014-08-19T16:50:06.647,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.648,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1927.1>,#Ref<16550.0.1.89767>}]}
[rebalance:info,2014-08-19T16:50:06.648,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 972
[rebalance:debug,2014-08-19T16:50:06.648,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1927.1>,#Ref<16550.0.1.89767>}]
[ns_server:debug,2014-08-19T16:50:06.649,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:06.658,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 722. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.658,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/722. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:06.658,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",722,pending,0}
[ns_server:debug,2014-08-19T16:50:06.658,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,974,759,743,727,1022,1006,993,977,762,
746,730,1009,996,980,765,749,733,1012,999,983,752,736,1015,986,755,739,723,
1018,1002,989,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,
732,1011,998,982,767,751,735,1014,985,754,738,722,1017,1001,988,757,741,725,
1020,1004,991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,
1013,753,1000,987,740,1019]
[rebalance:debug,2014-08-19T16:50:06.669,ns_1@10.242.238.90:<0.21678.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 972
[ns_server:info,2014-08-19T16:50:06.675,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 718 state to replica
[ns_server:info,2014-08-19T16:50:06.682,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 718 to state replica
[ns_server:debug,2014-08-19T16:50:06.727,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_718_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.728,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[718]},
{checkpoints,[{718,0}]},
{name,<<"replication_building_718_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[718]},
{takeover,false},
{suffix,"building_718_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",718,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:06.729,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21682.0>
[rebalance:debug,2014-08-19T16:50:06.729,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.730,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.1947.1>,#Ref<16550.0.1.89909>}]}
[rebalance:info,2014-08-19T16:50:06.730,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 718
[rebalance:debug,2014-08-19T16:50:06.730,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.1947.1>,#Ref<16550.0.1.89909>}]
[ns_server:debug,2014-08-19T16:50:06.731,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:06.731,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21683.0> (ok)
[rebalance:debug,2014-08-19T16:50:06.732,ns_1@10.242.238.90:<0.21684.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 718
[views:debug,2014-08-19T16:50:06.733,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/722. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:06.733,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",722,pending,0}
[ns_server:info,2014-08-19T16:50:06.800,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 971 state to replica
[ns_server:info,2014-08-19T16:50:06.804,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 971 to state replica
[ns_server:debug,2014-08-19T16:50:06.834,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 972. Nacking mccouch update.
[views:debug,2014-08-19T16:50:06.834,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/972. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.834,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",972,replica,0}
[ns_server:debug,2014-08-19T16:50:06.834,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,743,1022,993,977,762,746,730,1009,996,
980,765,749,733,1012,999,983,752,736,1015,986,755,739,723,1018,1002,989,758,
742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,998,982,
767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,1020,1004,991,
760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,1000,987,
740,1019,974,759,727,1006]
[ns_server:debug,2014-08-19T16:50:06.836,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_971_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.837,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[971]},
{checkpoints,[{971,0}]},
{name,<<"replication_building_971_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[971]},
{takeover,false},
{suffix,"building_971_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",971,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:06.838,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21702.0>
[rebalance:debug,2014-08-19T16:50:06.838,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.839,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2008.1>,#Ref<16550.0.1.90209>}]}
[rebalance:info,2014-08-19T16:50:06.839,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 971
[rebalance:debug,2014-08-19T16:50:06.840,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2008.1>,#Ref<16550.0.1.90209>}]
[ns_server:debug,2014-08-19T16:50:06.840,ns_1@10.242.238.90:<0.21701.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.858,ns_1@10.242.238.90:<0.21703.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 971
[ns_server:info,2014-08-19T16:50:06.864,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 717 state to replica
[ns_server:info,2014-08-19T16:50:06.870,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 717 to state replica
[views:debug,2014-08-19T16:50:06.892,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/972. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:06.893,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",972,replica,0}
[ns_server:debug,2014-08-19T16:50:06.916,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_717_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:06.917,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[717]},
{checkpoints,[{717,0}]},
{name,<<"replication_building_717_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[717]},
{takeover,false},
{suffix,"building_717_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",717,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:06.918,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21707.0>
[rebalance:debug,2014-08-19T16:50:06.918,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:06.919,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2028.1>,#Ref<16550.0.1.90325>}]}
[rebalance:info,2014-08-19T16:50:06.919,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 717
[rebalance:debug,2014-08-19T16:50:06.919,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2028.1>,#Ref<16550.0.1.90325>}]
[ns_server:debug,2014-08-19T16:50:06.920,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21708.0> (ok)
[ns_server:debug,2014-08-19T16:50:06.920,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:06.922,ns_1@10.242.238.90:<0.21709.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 717
[ns_server:info,2014-08-19T16:50:06.997,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 970 state to replica
[ns_server:info,2014-08-19T16:50:07.001,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 970 to state replica
[ns_server:debug,2014-08-19T16:50:07.032,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_970_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.034,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[970]},
{checkpoints,[{970,0}]},
{name,<<"replication_building_970_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[970]},
{takeover,false},
{suffix,"building_970_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",970,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:07.034,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21727.0>
[rebalance:debug,2014-08-19T16:50:07.034,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.035,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2085.1>,#Ref<16550.0.1.90612>}]}
[rebalance:info,2014-08-19T16:50:07.035,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 970
[rebalance:debug,2014-08-19T16:50:07.036,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2085.1>,#Ref<16550.0.1.90612>}]
[ns_server:debug,2014-08-19T16:50:07.037,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:07.042,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 720. Nacking mccouch update.
[views:debug,2014-08-19T16:50:07.042,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/720. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:07.043,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",720,pending,0}
[ns_server:debug,2014-08-19T16:50:07.043,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,984,737,1016,756,724,1003,990,743,1022,993,977,762,746,730,1009,996,
980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,
758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,998,
982,767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,1020,1004,
991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,1000,
987,740,1019,974,759,727,1006]
[rebalance:debug,2014-08-19T16:50:07.053,ns_1@10.242.238.90:<0.21728.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 970
[ns_server:info,2014-08-19T16:50:07.059,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 716 state to replica
[ns_server:info,2014-08-19T16:50:07.065,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 716 to state replica
[views:debug,2014-08-19T16:50:07.110,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/720. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:07.110,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",720,pending,0}
[ns_server:debug,2014-08-19T16:50:07.112,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_716_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.113,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[716]},
{checkpoints,[{716,0}]},
{name,<<"replication_building_716_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[716]},
{takeover,false},
{suffix,"building_716_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",716,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:07.114,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21732.0>
[rebalance:debug,2014-08-19T16:50:07.114,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.114,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2119.1>,#Ref<16550.0.1.90754>}]}
[rebalance:info,2014-08-19T16:50:07.114,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 716
[rebalance:debug,2014-08-19T16:50:07.115,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2119.1>,#Ref<16550.0.1.90754>}]
[ns_server:debug,2014-08-19T16:50:07.115,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21733.0> (ok)
[ns_server:debug,2014-08-19T16:50:07.115,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.116,ns_1@10.242.238.90:<0.21734.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 716
[ns_server:info,2014-08-19T16:50:07.187,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 969 state to replica
[ns_server:info,2014-08-19T16:50:07.192,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 969 to state replica
[ns_server:debug,2014-08-19T16:50:07.223,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_969_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.225,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[969]},
{checkpoints,[{969,0}]},
{name,<<"replication_building_969_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[969]},
{takeover,false},
{suffix,"building_969_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",969,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:07.226,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21752.0>
[rebalance:debug,2014-08-19T16:50:07.226,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.226,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2167.1>,#Ref<16550.0.1.91008>}]}
[rebalance:info,2014-08-19T16:50:07.226,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 969
[rebalance:debug,2014-08-19T16:50:07.227,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2167.1>,#Ref<16550.0.1.91008>}]
[ns_server:debug,2014-08-19T16:50:07.227,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.248,ns_1@10.242.238.90:<0.21753.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 969
[ns_server:debug,2014-08-19T16:50:07.251,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 718. Nacking mccouch update.
[views:debug,2014-08-19T16:50:07.251,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/718. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:07.251,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",718,pending,0}
[ns_server:debug,2014-08-19T16:50:07.252,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,756,724,1003,990,743,1022,993,977,762,746,730,1009,
996,980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,
989,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,1020,
1004,991,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,753,
1000,987,740,1019,974,759,727,1006]
[ns_server:info,2014-08-19T16:50:07.254,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 715 state to replica
[ns_server:info,2014-08-19T16:50:07.260,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 715 to state replica
[ns_server:debug,2014-08-19T16:50:07.308,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_715_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.309,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[715]},
{checkpoints,[{715,0}]},
{name,<<"replication_building_715_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[715]},
{takeover,false},
{suffix,"building_715_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",715,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:07.310,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21757.0>
[rebalance:debug,2014-08-19T16:50:07.310,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.311,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2201.1>,#Ref<16550.0.1.91174>}]}
[rebalance:info,2014-08-19T16:50:07.311,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 715
[rebalance:debug,2014-08-19T16:50:07.311,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2201.1>,#Ref<16550.0.1.91174>}]
[ns_server:debug,2014-08-19T16:50:07.312,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:07.312,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21758.0> (ok)
[rebalance:debug,2014-08-19T16:50:07.314,ns_1@10.242.238.90:<0.21759.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 715
[views:debug,2014-08-19T16:50:07.327,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/718. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:07.327,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",718,pending,0}
[ns_server:info,2014-08-19T16:50:07.383,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 968 state to replica
[ns_server:info,2014-08-19T16:50:07.387,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 968 to state replica
[ns_server:debug,2014-08-19T16:50:07.418,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_968_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.420,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[968]},
{checkpoints,[{968,0}]},
{name,<<"replication_building_968_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[968]},
{takeover,false},
{suffix,"building_968_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",968,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:07.421,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21769.0>
[rebalance:debug,2014-08-19T16:50:07.421,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.421,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2258.1>,#Ref<16550.0.1.91461>}]}
[rebalance:info,2014-08-19T16:50:07.422,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 968
[rebalance:debug,2014-08-19T16:50:07.422,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2258.1>,#Ref<16550.0.1.91461>}]
[ns_server:debug,2014-08-19T16:50:07.423,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.440,ns_1@10.242.238.90:<0.21770.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 968
[ns_server:info,2014-08-19T16:50:07.446,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 714 state to replica
[ns_server:info,2014-08-19T16:50:07.453,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 714 to state replica
[ns_server:debug,2014-08-19T16:50:07.477,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 975. Nacking mccouch update.
[views:debug,2014-08-19T16:50:07.477,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/975. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.477,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",975,replica,0}
[ns_server:debug,2014-08-19T16:50:07.478,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,756,724,1003,990,743,1022,993,977,762,746,730,1009,
996,980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,
989,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,1020,
1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,
753,1000,987,740,1019,974,759,727,1006]
[ns_server:debug,2014-08-19T16:50:07.500,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_714_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.502,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[714]},
{checkpoints,[{714,0}]},
{name,<<"replication_building_714_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[714]},
{takeover,false},
{suffix,"building_714_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",714,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:07.502,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21788.0>
[rebalance:debug,2014-08-19T16:50:07.502,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.503,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2278.1>,#Ref<16550.0.1.91578>}]}
[rebalance:info,2014-08-19T16:50:07.503,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 714
[rebalance:debug,2014-08-19T16:50:07.503,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2278.1>,#Ref<16550.0.1.91578>}]
[ns_server:debug,2014-08-19T16:50:07.504,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21789.0> (ok)
[ns_server:debug,2014-08-19T16:50:07.504,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.505,ns_1@10.242.238.90:<0.21790.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 714
[views:debug,2014-08-19T16:50:07.536,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/975. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.536,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",975,replica,0}
[ns_server:info,2014-08-19T16:50:07.574,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 967 state to replica
[ns_server:info,2014-08-19T16:50:07.578,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 967 to state replica
[ns_server:debug,2014-08-19T16:50:07.610,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_967_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.611,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[967]},
{checkpoints,[{967,0}]},
{name,<<"replication_building_967_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[967]},
{takeover,false},
{suffix,"building_967_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",967,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:07.612,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21799.0>
[rebalance:debug,2014-08-19T16:50:07.612,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.613,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2335.1>,#Ref<16550.0.1.92843>}]}
[rebalance:info,2014-08-19T16:50:07.613,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 967
[rebalance:debug,2014-08-19T16:50:07.614,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2335.1>,#Ref<16550.0.1.92843>}]
[ns_server:debug,2014-08-19T16:50:07.615,ns_1@10.242.238.90:<0.21793.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.633,ns_1@10.242.238.90:<0.21809.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 967
[ns_server:info,2014-08-19T16:50:07.640,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 713 state to replica
[ns_server:info,2014-08-19T16:50:07.646,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 713 to state replica
[ns_server:debug,2014-08-19T16:50:07.691,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_713_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.692,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[713]},
{checkpoints,[{713,0}]},
{name,<<"replication_building_713_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[713]},
{takeover,false},
{suffix,"building_713_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",713,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:07.693,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21813.0>
[rebalance:debug,2014-08-19T16:50:07.693,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.694,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2355.1>,#Ref<16550.0.1.92960>}]}
[rebalance:info,2014-08-19T16:50:07.694,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 713
[rebalance:debug,2014-08-19T16:50:07.694,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2355.1>,#Ref<16550.0.1.92960>}]
[ns_server:debug,2014-08-19T16:50:07.695,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 973. Nacking mccouch update.
[views:debug,2014-08-19T16:50:07.695,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/973. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.695,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",973,replica,0}
[ns_server:debug,2014-08-19T16:50:07.695,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:07.695,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,756,724,1003,990,743,1022,993,977,762,746,730,1009,
996,980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,
989,973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,
1011,998,982,767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,
1013,753,1000,987,740,1019,974,759,727,1006]
[ns_server:debug,2014-08-19T16:50:07.696,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21814.0> (ok)
[rebalance:debug,2014-08-19T16:50:07.698,ns_1@10.242.238.90:<0.21815.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 713
[views:debug,2014-08-19T16:50:07.755,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/973. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.755,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",973,replica,0}
[ns_server:info,2014-08-19T16:50:07.766,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 966 state to replica
[ns_server:info,2014-08-19T16:50:07.771,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 966 to state replica
[ns_server:debug,2014-08-19T16:50:07.803,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_966_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.805,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[966]},
{checkpoints,[{966,0}]},
{name,<<"replication_building_966_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[966]},
{takeover,false},
{suffix,"building_966_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",966,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:07.805,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21820.0>
[rebalance:debug,2014-08-19T16:50:07.805,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.806,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2412.1>,#Ref<16550.0.1.93221>}]}
[rebalance:info,2014-08-19T16:50:07.806,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 966
[rebalance:debug,2014-08-19T16:50:07.807,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2412.1>,#Ref<16550.0.1.93221>}]
[ns_server:debug,2014-08-19T16:50:07.807,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.825,ns_1@10.242.238.90:<0.21821.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 966
[ns_server:info,2014-08-19T16:50:07.831,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 712 state to replica
[ns_server:info,2014-08-19T16:50:07.837,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 712 to state replica
[ns_server:debug,2014-08-19T16:50:07.883,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_712_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:07.885,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[712]},
{checkpoints,[{712,0}]},
{name,<<"replication_building_712_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[712]},
{takeover,false},
{suffix,"building_712_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",712,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:07.886,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21839.0>
[rebalance:debug,2014-08-19T16:50:07.886,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:07.886,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2432.1>,#Ref<16550.0.1.93318>}]}
[rebalance:info,2014-08-19T16:50:07.886,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 712
[rebalance:debug,2014-08-19T16:50:07.887,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2432.1>,#Ref<16550.0.1.93318>}]
[ns_server:debug,2014-08-19T16:50:07.888,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21840.0> (ok)
[ns_server:debug,2014-08-19T16:50:07.888,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:07.889,ns_1@10.242.238.90:<0.21841.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 712
[ns_server:debug,2014-08-19T16:50:07.918,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 971. Nacking mccouch update.
[views:debug,2014-08-19T16:50:07.918,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/971. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.918,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",971,replica,0}
[ns_server:debug,2014-08-19T16:50:07.918,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,
973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,754,738,722,1017,1001,988,972,757,741,725,1020,
1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,1013,
753,1000,987,740,1019,974,759,727,1006,993,746]
[views:debug,2014-08-19T16:50:07.952,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/971. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:07.952,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",971,replica,0}
[ns_server:info,2014-08-19T16:50:07.963,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 965 state to replica
[ns_server:info,2014-08-19T16:50:07.967,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 965 to state replica
[ns_server:debug,2014-08-19T16:50:07.998,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_965_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.000,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[965]},
{checkpoints,[{965,0}]},
{name,<<"replication_building_965_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[965]},
{takeover,false},
{suffix,"building_965_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",965,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:08.001,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21845.0>
[rebalance:debug,2014-08-19T16:50:08.001,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.001,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2516.1>,#Ref<16550.0.1.94087>}]}
[rebalance:info,2014-08-19T16:50:08.001,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 965
[rebalance:debug,2014-08-19T16:50:08.002,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2516.1>,#Ref<16550.0.1.94087>}]
[ns_server:debug,2014-08-19T16:50:08.003,ns_1@10.242.238.90:<0.21844.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[ns_server:debug,2014-08-19T16:50:08.035,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 969. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.035,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/969. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:08.036,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",969,replica,0}
[ns_server:debug,2014-08-19T16:50:08.036,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,
973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,
1013,753,1000,987,740,1019,974,759,727,1006,993,746]
[rebalance:debug,2014-08-19T16:50:08.037,ns_1@10.242.238.90:<0.21860.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 965
[views:debug,2014-08-19T16:50:08.069,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/969. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:08.069,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",969,replica,0}
[ns_server:info,2014-08-19T16:50:08.079,ns_1@10.242.238.90:<0.18786.0>:ns_memcached:do_handle_call:527]Changed vbucket 711 state to replica
[ns_server:info,2014-08-19T16:50:08.085,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 711 to state replica
[ns_server:debug,2014-08-19T16:50:08.131,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_711_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.132,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[711]},
{checkpoints,[{711,0}]},
{name,<<"replication_building_711_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[711]},
{takeover,false},
{suffix,"building_711_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",711,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:08.132,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21864.0>
[rebalance:debug,2014-08-19T16:50:08.133,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.133,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2550.1>,#Ref<16550.0.1.94268>}]}
[rebalance:info,2014-08-19T16:50:08.133,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 711
[rebalance:debug,2014-08-19T16:50:08.134,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2550.1>,#Ref<16550.0.1.94268>}]
[ns_server:debug,2014-08-19T16:50:08.134,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21865.0> (ok)
[ns_server:debug,2014-08-19T16:50:08.134,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.136,ns_1@10.242.238.90:<0.21866.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 711
[ns_server:info,2014-08-19T16:50:08.206,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 964 state to replica
[ns_server:info,2014-08-19T16:50:08.211,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 964 to state replica
[ns_server:debug,2014-08-19T16:50:08.236,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 721. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.236,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/721. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.237,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",721,pending,0}
[ns_server:debug,2014-08-19T16:50:08.237,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,
973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,734,
1013,753,721,1000,987,740,1019,974,759,727,1006,993,746]
[ns_server:debug,2014-08-19T16:50:08.243,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_964_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.244,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[964]},
{checkpoints,[{964,0}]},
{name,<<"replication_building_964_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[964]},
{takeover,false},
{suffix,"building_964_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",964,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:08.245,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21884.0>
[rebalance:debug,2014-08-19T16:50:08.245,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.245,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2593.1>,#Ref<16550.0.1.94481>}]}
[rebalance:info,2014-08-19T16:50:08.246,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 964
[rebalance:debug,2014-08-19T16:50:08.246,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2593.1>,#Ref<16550.0.1.94481>}]
[ns_server:debug,2014-08-19T16:50:08.247,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.263,ns_1@10.242.238.90:<0.21885.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 964
[ns_server:info,2014-08-19T16:50:08.269,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 710 state to replica
[views:debug,2014-08-19T16:50:08.270,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/721. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.270,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",721,pending,0}
[ns_server:info,2014-08-19T16:50:08.275,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 710 to state replica
[ns_server:debug,2014-08-19T16:50:08.320,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_710_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.321,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[710]},
{checkpoints,[{710,0}]},
{name,<<"replication_building_710_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[710]},
{takeover,false},
{suffix,"building_710_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",710,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:08.322,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21895.0>
[rebalance:debug,2014-08-19T16:50:08.322,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.323,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2627.1>,#Ref<16550.0.1.94623>}]}
[rebalance:info,2014-08-19T16:50:08.323,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 710
[rebalance:debug,2014-08-19T16:50:08.323,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2627.1>,#Ref<16550.0.1.94623>}]
[ns_server:debug,2014-08-19T16:50:08.324,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21903.0> (ok)
[ns_server:debug,2014-08-19T16:50:08.324,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.325,ns_1@10.242.238.90:<0.21905.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 710
[ns_server:debug,2014-08-19T16:50:08.387,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 719. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.387,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/719. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.387,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",719,pending,0}
[ns_server:debug,2014-08-19T16:50:08.387,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
980,765,749,733,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,
973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,
998,982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,
725,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,766,
734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,746]
[ns_server:info,2014-08-19T16:50:08.394,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 963 state to replica
[ns_server:info,2014-08-19T16:50:08.398,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 963 to state replica
[ns_server:debug,2014-08-19T16:50:08.430,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_963_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.432,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[963]},
{checkpoints,[{963,0}]},
{name,<<"replication_building_963_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[963]},
{takeover,false},
{suffix,"building_963_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",963,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:08.433,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21919.0>
[rebalance:debug,2014-08-19T16:50:08.433,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.434,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2671.1>,#Ref<16550.0.1.94911>}]}
[rebalance:info,2014-08-19T16:50:08.434,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 963
[rebalance:debug,2014-08-19T16:50:08.434,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2671.1>,#Ref<16550.0.1.94911>}]
[ns_server:debug,2014-08-19T16:50:08.435,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[views:debug,2014-08-19T16:50:08.438,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/719. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.438,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",719,pending,0}
[rebalance:debug,2014-08-19T16:50:08.452,ns_1@10.242.238.90:<0.21920.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 963
[ns_server:info,2014-08-19T16:50:08.458,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 709 state to replica
[ns_server:info,2014-08-19T16:50:08.464,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 709 to state replica
[ns_server:debug,2014-08-19T16:50:08.510,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_709_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.511,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[709]},
{checkpoints,[{709,0}]},
{name,<<"replication_building_709_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[709]},
{takeover,false},
{suffix,"building_709_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",709,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:08.512,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21938.0>
[rebalance:debug,2014-08-19T16:50:08.512,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.513,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2705.1>,#Ref<16550.0.1.95058>}]}
[rebalance:info,2014-08-19T16:50:08.513,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 709
[rebalance:debug,2014-08-19T16:50:08.513,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2705.1>,#Ref<16550.0.1.95058>}]
[ns_server:debug,2014-08-19T16:50:08.514,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21939.0> (ok)
[ns_server:debug,2014-08-19T16:50:08.514,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.515,ns_1@10.242.238.90:<0.21940.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 709
[ns_server:debug,2014-08-19T16:50:08.555,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 717. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.555,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/717. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.555,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",717,pending,0}
[ns_server:debug,2014-08-19T16:50:08.555,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
980,765,749,733,717,1012,999,983,752,736,720,1015,986,755,739,723,1018,1002,
989,973,758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,
1011,998,982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,
741,725,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,1010,981,
766,734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,746]
[ns_server:info,2014-08-19T16:50:08.589,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 962 state to replica
[ns_server:info,2014-08-19T16:50:08.593,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 962 to state replica
[views:debug,2014-08-19T16:50:08.622,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/717. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.622,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",717,pending,0}
[ns_server:debug,2014-08-19T16:50:08.626,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_962_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.628,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[962]},
{checkpoints,[{962,0}]},
{name,<<"replication_building_962_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[962]},
{takeover,false},
{suffix,"building_962_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",962,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:08.628,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21944.0>
[rebalance:debug,2014-08-19T16:50:08.628,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.629,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2762.1>,#Ref<16550.0.1.95342>}]}
[rebalance:info,2014-08-19T16:50:08.629,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 962
[rebalance:debug,2014-08-19T16:50:08.630,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2762.1>,#Ref<16550.0.1.95342>}]
[ns_server:debug,2014-08-19T16:50:08.630,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.647,ns_1@10.242.238.90:<0.21945.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 962
[ns_server:info,2014-08-19T16:50:08.652,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 708 state to replica
[ns_server:info,2014-08-19T16:50:08.661,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 708 to state replica
[ns_server:debug,2014-08-19T16:50:08.707,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_708_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.708,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[708]},
{checkpoints,[{708,0}]},
{name,<<"replication_building_708_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[708]},
{takeover,false},
{suffix,"building_708_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",708,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:08.709,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21963.0>
[rebalance:debug,2014-08-19T16:50:08.709,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.710,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2782.1>,#Ref<16550.0.1.95482>}]}
[rebalance:info,2014-08-19T16:50:08.710,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 708
[rebalance:debug,2014-08-19T16:50:08.710,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2782.1>,#Ref<16550.0.1.95482>}]
[ns_server:debug,2014-08-19T16:50:08.711,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21964.0> (ok)
[ns_server:debug,2014-08-19T16:50:08.711,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.713,ns_1@10.242.238.90:<0.21965.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 708
[ns_server:debug,2014-08-19T16:50:08.722,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 715. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.722,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/715. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.722,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",715,pending,0}
[ns_server:debug,2014-08-19T16:50:08.723,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
749,717,999,983,752,736,720,1015,986,755,739,723,1018,1002,989,973,758,742,
726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,998,982,767,
751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,1020,1004,
991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,734,1013,
753,721,1000,987,740,1019,974,759,727,1006,993,746,980,765,733,1012]
[ns_server:info,2014-08-19T16:50:08.781,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 961 state to replica
[ns_server:info,2014-08-19T16:50:08.785,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 961 to state replica
[views:debug,2014-08-19T16:50:08.789,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/715. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:08.789,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",715,pending,0}
[ns_server:debug,2014-08-19T16:50:08.816,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_961_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.818,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[961]},
{checkpoints,[{961,0}]},
{name,<<"replication_building_961_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[961]},
{takeover,false},
{suffix,"building_961_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",961,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:08.818,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21969.0>
[rebalance:debug,2014-08-19T16:50:08.818,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.819,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2839.1>,#Ref<16550.0.1.95769>}]}
[rebalance:info,2014-08-19T16:50:08.819,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 961
[rebalance:debug,2014-08-19T16:50:08.820,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2839.1>,#Ref<16550.0.1.95769>}]
[ns_server:debug,2014-08-19T16:50:08.821,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.841,ns_1@10.242.238.90:<0.21970.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 961
[ns_server:info,2014-08-19T16:50:08.846,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 707 state to replica
[ns_server:info,2014-08-19T16:50:08.852,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 707 to state replica
[ns_server:debug,2014-08-19T16:50:08.899,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_707_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:08.900,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[707]},
{checkpoints,[{707,0}]},
{name,<<"replication_building_707_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[707]},
{takeover,false},
{suffix,"building_707_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",707,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:08.901,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21988.0>
[rebalance:debug,2014-08-19T16:50:08.901,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:08.901,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2873.1>,#Ref<16550.0.1.95936>}]}
[rebalance:info,2014-08-19T16:50:08.902,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 707
[rebalance:debug,2014-08-19T16:50:08.902,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2873.1>,#Ref<16550.0.1.95936>}]
[ns_server:debug,2014-08-19T16:50:08.902,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21989.0> (ok)
[ns_server:debug,2014-08-19T16:50:08.903,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:08.904,ns_1@10.242.238.90:<0.21990.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 707
[ns_server:debug,2014-08-19T16:50:08.940,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 967. Nacking mccouch update.
[views:debug,2014-08-19T16:50:08.940,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/967. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:08.940,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",967,replica,0}
[ns_server:debug,2014-08-19T16:50:08.940,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,996,
749,717,999,983,967,752,736,720,1015,986,755,739,723,1018,1002,989,973,758,
742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,998,982,
767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,1020,
1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,734,
1013,753,721,1000,987,740,1019,974,759,727,1006,993,746,980,765,733,1012]
[ns_server:info,2014-08-19T16:50:08.977,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 960 state to replica
[ns_server:info,2014-08-19T16:50:08.981,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 960 to state replica
[views:debug,2014-08-19T16:50:08.991,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/967. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:08.991,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",967,replica,0}
[ns_server:debug,2014-08-19T16:50:09.015,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_960_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:09.016,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[960]},
{checkpoints,[{960,0}]},
{name,<<"replication_building_960_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[960]},
{takeover,false},
{suffix,"building_960_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",960,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,false}]}
[rebalance:debug,2014-08-19T16:50:09.017,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.21994.0>
[rebalance:debug,2014-08-19T16:50:09.017,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:09.018,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2930.1>,#Ref<16550.0.1.96202>}]}
[rebalance:info,2014-08-19T16:50:09.018,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 960
[rebalance:debug,2014-08-19T16:50:09.018,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2930.1>,#Ref<16550.0.1.96202>}]
[ns_server:debug,2014-08-19T16:50:09.019,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:09.035,ns_1@10.242.238.90:<0.21995.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 960
[ns_server:info,2014-08-19T16:50:09.040,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 706 state to replica
[ns_server:info,2014-08-19T16:50:09.046,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 706 to state replica
[ns_server:debug,2014-08-19T16:50:09.095,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_706_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:09.097,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[706]},
{checkpoints,[{706,0}]},
{name,<<"replication_building_706_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[706]},
{takeover,false},
{suffix,"building_706_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",706,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:09.098,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22013.0>
[rebalance:debug,2014-08-19T16:50:09.098,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:09.098,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.2950.1>,#Ref<16550.0.1.96339>}]}
[rebalance:info,2014-08-19T16:50:09.098,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 706
[rebalance:debug,2014-08-19T16:50:09.099,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.2950.1>,#Ref<16550.0.1.96339>}]
[ns_server:debug,2014-08-19T16:50:09.099,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22014.0> (ok)
[ns_server:debug,2014-08-19T16:50:09.100,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:09.101,ns_1@10.242.238.90:<0.22015.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 706
[ns_server:debug,2014-08-19T16:50:09.141,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 965. Nacking mccouch update.
[views:debug,2014-08-19T16:50:09.141,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/965. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.141,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",965,replica,0}
[ns_server:debug,2014-08-19T16:50:09.141,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,
996,749,717,999,983,967,752,736,720,1015,986,755,739,723,1018,1002,989,973,
758,742,726,1021,1005,992,976,761,745,729,1008,995,979,764,748,732,1011,998,
982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,
734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,746,980,765,733,1012]
[ns_server:info,2014-08-19T16:50:09.171,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 705 state to replica
[ns_server:info,2014-08-19T16:50:09.178,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 705 to state replica
[views:debug,2014-08-19T16:50:09.208,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/965. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.208,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",965,replica,0}
[ns_server:debug,2014-08-19T16:50:09.224,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_705_'ns_1@10.242.238.90'
[rebalance:info,2014-08-19T16:50:09.225,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[705]},
{checkpoints,[{705,0}]},
{name,<<"replication_building_705_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[705]},
{takeover,false},
{suffix,"building_705_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",705,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:09.226,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22019.0>
[rebalance:debug,2014-08-19T16:50:09.226,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:09.227,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.3011.1>,#Ref<16550.0.1.96601>}]}
[rebalance:info,2014-08-19T16:50:09.227,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 705
[rebalance:debug,2014-08-19T16:50:09.227,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.3011.1>,#Ref<16550.0.1.96601>}]
[ns_server:debug,2014-08-19T16:50:09.228,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22020.0> (ok)
[ns_server:debug,2014-08-19T16:50:09.228,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:09.229,ns_1@10.242.238.90:<0.22021.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 705
[ns_server:info,2014-08-19T16:50:09.297,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 704 state to replica
[ns_server:info,2014-08-19T16:50:09.303,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:init:544]Setting {"10.242.238.90",11209} vbucket 704 to state replica
[ns_server:debug,2014-08-19T16:50:09.349,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:kill_tapname:1090]killing tap named: replication_building_704_'ns_1@10.242.238.90'
[ns_server:debug,2014-08-19T16:50:09.350,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 963. Nacking mccouch update.
[views:debug,2014-08-19T16:50:09.350,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/963. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.350,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",963,replica,0}
[ns_server:debug,2014-08-19T16:50:09.350,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,
996,749,717,999,983,967,752,736,720,1015,986,755,739,723,1018,1002,989,973,
758,742,726,1021,1005,992,976,761,745,729,1008,995,979,963,764,748,732,1011,
998,982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,
725,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,
766,734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,746,980,765,733,
1012]
[rebalance:info,2014-08-19T16:50:09.350,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:init:603]Starting tap stream:
[{vbuckets,[704]},
{checkpoints,[{704,0}]},
{name,<<"replication_building_704_'ns_1@10.242.238.90'">>},
{takeover,false}]
{{"10.242.238.88",11209},
{"10.242.238.90",11209},
[{vbuckets,[704]},
{takeover,false},
{suffix,"building_704_'ns_1@10.242.238.90'"},
{note_tap_stats,{replica_building,"default",704,'ns_1@10.242.238.88',
'ns_1@10.242.238.90'}},
{username,"default"},
{password,get_from_config},
{set_to_pending_state,true}]}
[rebalance:debug,2014-08-19T16:50:09.351,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22039.0>
[rebalance:debug,2014-08-19T16:50:09.351,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:do_note_tap_stats:693]Handling note_tap_stats
[rebalance:debug,2014-08-19T16:50:09.352,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:handle_call:335]Suspended had_backfill waiter
{had_backfill,undefined,undefined,[{<16550.3053.1>,#Ref<16550.0.1.96812>}]}
[rebalance:info,2014-08-19T16:50:09.352,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:process_upstream:1017]Initial stream for vbucket 704
[rebalance:debug,2014-08-19T16:50:09.352,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:process_downstream:981]Replied had_backfill: true to [{<16550.3053.1>,#Ref<16550.0.1.96812>}]
[ns_server:debug,2014-08-19T16:50:09.353,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22040.0> (ok)
[ns_server:debug,2014-08-19T16:50:09.353,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:process_upstream:1031]seen backfill-close message
[rebalance:debug,2014-08-19T16:50:09.355,ns_1@10.242.238.90:<0.22041.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 704
[views:debug,2014-08-19T16:50:09.413,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/963. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.413,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",963,replica,0}
[ns_server:debug,2014-08-19T16:50:09.481,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 961. Nacking mccouch update.
[views:debug,2014-08-19T16:50:09.481,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/961. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.481,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",961,replica,0}
[ns_server:debug,2014-08-19T16:50:09.482,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,
996,749,717,999,983,967,752,736,720,1015,986,755,739,723,1018,1002,989,973,
758,742,726,1021,1005,992,976,761,745,729,1008,995,979,963,764,748,732,1011,
998,982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,
725,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,
766,734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,
733,1012]
[views:debug,2014-08-19T16:50:09.515,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/961. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:09.515,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",961,replica,0}
[ns_server:debug,2014-08-19T16:50:09.674,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 713. Nacking mccouch update.
[views:debug,2014-08-19T16:50:09.674,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/713. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:09.674,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",713,pending,0}
[ns_server:debug,2014-08-19T16:50:09.674,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,1022,977,762,730,1009,
996,749,717,983,736,1015,986,755,739,723,1018,1002,989,973,758,742,726,1021,
1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,767,
751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,1020,1004,
991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,734,1013,
753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,733,1012,999,
967,752,720]
[views:debug,2014-08-19T16:50:09.741,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/713. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:09.741,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",713,pending,0}
[ns_server:debug,2014-08-19T16:50:09.874,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 711. Nacking mccouch update.
[views:debug,2014-08-19T16:50:09.875,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/711. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:09.875,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",711,pending,0}
[ns_server:debug,2014-08-19T16:50:09.875,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,749,717,983,736,1015,986,755,739,723,1018,1002,989,973,758,742,726,
1021,1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,
767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,1020,
1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,734,
1013,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,733,1012,
999,967,752,720]
[views:debug,2014-08-19T16:50:09.942,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/711. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:09.942,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",711,pending,0}
[ns_server:debug,2014-08-19T16:50:10.100,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 709. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.100,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/709. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:10.100,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",709,pending,0}
[ns_server:debug,2014-08-19T16:50:10.101,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,749,717,983,736,1015,986,755,739,723,1018,1002,989,973,758,742,726,
1021,1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,
767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,
734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,733,
1012,999,967,752,720]
[views:debug,2014-08-19T16:50:10.167,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/709. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:10.168,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",709,pending,0}
[ns_server:debug,2014-08-19T16:50:10.242,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 970. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.242,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/970. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.242,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",970,replica,0}
[ns_server:debug,2014-08-19T16:50:10.242,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,749,717,983,736,1015,986,970,755,739,723,1018,1002,989,973,758,742,
726,1021,1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,
982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
709,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,
766,734,1013,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,
733,1012,999,967,752,720]
[views:debug,2014-08-19T16:50:10.276,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/970. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.276,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",970,replica,0}
[ns_server:debug,2014-08-19T16:50:10.360,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 968. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.360,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/968. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.360,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",968,replica,0}
[ns_server:debug,2014-08-19T16:50:10.360,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,749,717,983,736,1015,986,970,755,739,723,1018,1002,989,973,758,742,
726,1021,1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,
982,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
709,1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,
766,734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,
765,733,1012,999,967,752,720]
[views:debug,2014-08-19T16:50:10.393,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/968. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.393,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",968,replica,0}
[ns_server:debug,2014-08-19T16:50:10.477,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 966. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.477,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/966. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.477,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",966,replica,0}
[ns_server:debug,2014-08-19T16:50:10.477,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,749,717,983,736,1015,970,755,723,1002,989,973,758,742,726,1021,1005,
992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,966,767,
751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,1020,
1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,734,
1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,733,
1012,999,967,752,720,986,739,1018]
[views:debug,2014-08-19T16:50:10.511,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/966. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.511,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",966,replica,0}
[ns_server:debug,2014-08-19T16:50:10.578,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 964. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.578,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/964. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.578,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",964,replica,0}
[ns_server:debug,2014-08-19T16:50:10.578,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,973,758,742,726,1021,
1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,966,
767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,
1020,1004,991,975,760,744,728,1023,1007,994,978,763,747,731,715,1010,981,766,
734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,765,
733,1012,999,967,752,720,986,739,1018]
[views:debug,2014-08-19T16:50:10.612,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/964. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.612,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",964,replica,0}
[ns_server:debug,2014-08-19T16:50:10.692,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 962. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.693,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/962. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.693,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",962,replica,0}
[ns_server:debug,2014-08-19T16:50:10.693,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,973,758,742,726,1021,
1005,992,976,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,966,
767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,
1020,1004,991,975,760,744,728,1023,1007,994,978,962,763,747,731,715,1010,981,
766,734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,980,
765,733,1012,999,967,752,720,986,739,1018]
[views:debug,2014-08-19T16:50:10.768,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/962. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.768,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",962,replica,0}
[ns_server:debug,2014-08-19T16:50:10.935,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 960. Nacking mccouch update.
[views:debug,2014-08-19T16:50:10.935,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/960. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:10.935,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",960,replica,0}
[ns_server:debug,2014-08-19T16:50:10.936,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,973,758,742,726,1021,
1005,992,976,960,761,745,729,713,1008,995,979,963,764,748,732,1011,998,982,
966,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,
709,1020,1004,991,975,760,744,728,1023,1007,994,978,962,763,747,731,715,1010,
981,766,734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,
980,765,733,1012,999,967,752,720,986,739,1018]
[views:debug,2014-08-19T16:50:11.011,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/960. Updated state: replica (0)
[ns_server:debug,2014-08-19T16:50:11.011,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",960,replica,0}
[ns_server:debug,2014-08-19T16:50:11.177,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 716. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.178,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/716. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.178,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",716,pending,0}
[ns_server:debug,2014-08-19T16:50:11.178,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,973,758,742,726,1021,
1005,992,976,960,761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,
982,966,767,751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,
725,709,1020,1004,991,975,760,744,728,1023,1007,994,978,962,763,747,731,715,
1010,981,766,734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,
746,980,765,733,1012,999,967,752,720,986,739,1018]
[views:debug,2014-08-19T16:50:11.245,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/716. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.245,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",716,pending,0}
[ns_server:debug,2014-08-19T16:50:11.378,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 714. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.378,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/714. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.378,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",714,pending,0}
[ns_server:debug,2014-08-19T16:50:11.379,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,742,1021,992,976,960,
761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,982,966,767,751,
735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,1020,1004,
991,975,760,744,728,1023,1007,994,978,962,763,747,731,715,1010,981,766,734,
1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,714,980,765,
733,1012,999,967,752,720,986,739,1018,973,758,726,1005]
[views:debug,2014-08-19T16:50:11.496,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/714. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.496,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",714,pending,0}
[ns_server:debug,2014-08-19T16:50:11.621,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 712. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.621,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/712. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.621,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",712,pending,0}
[ns_server:debug,2014-08-19T16:50:11.621,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,742,1021,992,976,960,
761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,982,966,767,751,
735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,1020,1004,
991,975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,766,
734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,714,980,
765,733,1012,999,967,752,720,986,739,1018,973,758,726,1005]
[views:debug,2014-08-19T16:50:11.696,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/712. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.697,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",712,pending,0}
[ns_server:debug,2014-08-19T16:50:11.764,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 710. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.764,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/710. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.764,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",710,pending,0}
[ns_server:debug,2014-08-19T16:50:11.764,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,742,710,1021,992,976,
960,761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,982,966,767,
751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,1020,
1004,991,975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,
766,734,1013,968,753,721,1000,987,740,1019,974,759,727,1006,993,961,746,714,
980,765,733,1012,999,967,752,720,986,739,1018,973,758,726,1005]
[views:debug,2014-08-19T16:50:11.798,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/710. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.798,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",710,pending,0}
[ns_server:debug,2014-08-19T16:50:11.865,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 708. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.865,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/708. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.865,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",708,pending,0}
[ns_server:debug,2014-08-19T16:50:11.865,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,742,710,1021,992,976,
960,761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,982,966,767,
751,735,719,1014,985,969,754,738,722,1017,1001,988,972,757,741,725,709,1020,
1004,991,975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,
766,734,1013,968,753,721,1000,987,740,708,1019,974,759,727,1006,993,961,746,
714,980,765,733,1012,999,967,752,720,986,739,1018,973,758,726,1005]
[views:debug,2014-08-19T16:50:11.899,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/708. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.899,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",708,pending,0}
[ns_server:debug,2014-08-19T16:50:11.965,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 706. Nacking mccouch update.
[views:debug,2014-08-19T16:50:11.966,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/706. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:11.966,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",706,pending,0}
[ns_server:debug,2014-08-19T16:50:11.966,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,1015,970,755,723,1002,989,742,710,1021,992,976,
960,761,745,729,713,1008,995,979,963,764,748,732,716,1011,998,982,966,767,
751,735,719,1014,985,969,754,738,722,706,1017,1001,988,972,757,741,725,709,
1020,1004,991,975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,
981,766,734,1013,968,753,721,1000,987,740,708,1019,974,759,727,1006,993,961,
746,714,980,765,733,1012,999,967,752,720,986,739,1018,973,758,726,1005]
[views:debug,2014-08-19T16:50:12.000,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/706. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.000,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",706,pending,0}
[ns_server:debug,2014-08-19T16:50:12.066,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 704. Nacking mccouch update.
[views:debug,2014-08-19T16:50:12.066,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/704. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.066,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",704,pending,0}
[ns_server:debug,2014-08-19T16:50:12.067,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,704,1015,970,755,723,1002,989,742,710,1021,976,
761,729,1008,995,979,963,764,748,732,716,1011,998,982,966,767,751,735,719,
1014,985,969,754,738,722,706,1017,1001,988,972,757,741,725,709,1020,1004,991,
975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,766,734,
1013,968,753,721,1000,987,740,708,1019,974,759,727,1006,993,961,746,714,980,
765,733,1012,999,967,752,720,986,739,1018,973,758,726,1005,992,960,745,713]
[views:debug,2014-08-19T16:50:12.125,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/704. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.125,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",704,pending,0}
[ns_server:debug,2014-08-19T16:50:12.238,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 707. Nacking mccouch update.
[views:debug,2014-08-19T16:50:12.238,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/707. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.238,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",707,pending,0}
[ns_server:debug,2014-08-19T16:50:12.239,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,1016,971,756,724,1003,990,743,711,1022,977,762,730,
1009,996,964,749,717,983,736,704,1015,970,755,723,1002,989,742,710,1021,976,
761,729,1008,995,979,963,764,748,732,716,1011,998,982,966,767,751,735,719,
1014,985,969,754,738,722,706,1017,1001,988,972,757,741,725,709,1020,1004,991,
975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,766,734,
1013,968,753,721,1000,987,740,708,1019,974,759,727,1006,993,961,746,714,980,
765,733,1012,999,967,752,720,986,739,707,1018,973,758,726,1005,992,960,745,
713]
[views:debug,2014-08-19T16:50:12.314,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/707. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.314,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",707,pending,0}
[ns_server:debug,2014-08-19T16:50:12.464,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:110]Added _local/vbuuid document into vb: 705. Nacking mccouch update.
[views:debug,2014-08-19T16:50:12.464,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/705. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.464,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",705,pending,0}
[ns_server:debug,2014-08-19T16:50:12.464,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:387]Usable vbuckets:
[997,965,750,718,984,737,705,1016,971,756,724,1003,990,743,711,1022,977,762,
730,1009,996,964,749,717,983,736,704,1015,970,755,723,1002,989,742,710,1021,
976,761,729,1008,995,979,963,764,748,732,716,1011,998,982,966,767,751,735,
719,1014,985,969,754,738,722,706,1017,1001,988,972,757,741,725,709,1020,1004,
991,975,760,744,728,712,1023,1007,994,978,962,763,747,731,715,1010,981,766,
734,1013,968,753,721,1000,987,740,708,1019,974,759,727,1006,993,961,746,714,
980,765,733,1012,999,967,752,720,986,739,707,1018,973,758,726,1005,992,960,
745,713]
[views:debug,2014-08-19T16:50:12.533,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/705. Updated state: pending (0)
[ns_server:debug,2014-08-19T16:50:12.533,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",705,pending,0}
[rebalance:debug,2014-08-19T16:50:12.534,ns_1@10.242.238.90:<0.21905.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:12.535,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21905.0> (ok)
[rebalance:debug,2014-08-19T16:50:12.610,ns_1@10.242.238.90:<0.21841.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:12.610,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21841.0> (ok)
[rebalance:debug,2014-08-19T16:50:12.610,ns_1@10.242.238.90:<0.22021.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:12.610,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22021.0> (ok)
[rebalance:debug,2014-08-19T16:50:12.733,ns_1@10.242.238.90:<0.21790.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:12.733,ns_1@10.242.238.90:<0.21990.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:12.733,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21790.0> (ok)
[ns_server:debug,2014-08-19T16:50:12.733,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21990.0> (ok)
[rebalance:debug,2014-08-19T16:50:12.867,ns_1@10.242.238.90:<0.21734.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:12.867,ns_1@10.242.238.90:<0.21940.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:12.867,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21734.0> (ok)
[ns_server:debug,2014-08-19T16:50:12.867,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21940.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.001,ns_1@10.242.238.90:<0.21684.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.001,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21684.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.001,ns_1@10.242.238.90:<0.21866.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.001,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21866.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.126,ns_1@10.242.238.90:<0.21633.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.126,ns_1@10.242.238.90:<0.21815.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.126,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21633.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.126,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21815.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.242,ns_1@10.242.238.90:<0.21759.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.242,ns_1@10.242.238.90:<0.21583.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.243,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21759.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.243,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21583.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.334,ns_1@10.242.238.90:<0.21519.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.335,ns_1@10.242.238.90:<0.21709.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.335,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21519.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.335,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21709.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.468,ns_1@10.242.238.90:<0.21945.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.468,ns_1@10.242.238.90:<0.21659.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.468,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21945.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.469,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21659.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.536,ns_1@10.242.238.90:<0.21885.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.536,ns_1@10.242.238.90:<0.21608.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.536,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21885.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.536,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21608.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.628,ns_1@10.242.238.90:<0.21821.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.628,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21821.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.628,ns_1@10.242.238.90:<0.21558.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.628,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21558.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.737,ns_1@10.242.238.90:<0.21770.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.737,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21770.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.737,ns_1@10.242.238.90:<0.21970.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.737,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21970.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.829,ns_1@10.242.238.90:<0.21728.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.829,ns_1@10.242.238.90:<0.21920.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.829,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21728.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.829,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21920.0> (ok)
[rebalance:debug,2014-08-19T16:50:13.921,ns_1@10.242.238.90:<0.21860.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:13.921,ns_1@10.242.238.90:<0.21678.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:13.921,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21860.0> (ok)
[ns_server:debug,2014-08-19T16:50:13.921,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21678.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.035,ns_1@10.242.238.90:<0.21809.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:14.035,ns_1@10.242.238.90:<0.21613.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.035,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21809.0> (ok)
[ns_server:debug,2014-08-19T16:50:14.035,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21613.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.160,ns_1@10.242.238.90:<0.21563.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:14.161,ns_1@10.242.238.90:<0.21753.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.161,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21563.0> (ok)
[ns_server:debug,2014-08-19T16:50:14.161,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21753.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.261,ns_1@10.242.238.90:<0.21513.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:14.261,ns_1@10.242.238.90:<0.21703.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.261,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21513.0> (ok)
[ns_server:debug,2014-08-19T16:50:14.261,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21703.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.378,ns_1@10.242.238.90:<0.21653.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.378,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21653.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.428,ns_1@10.242.238.90:<0.21588.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.428,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21588.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.478,ns_1@10.242.238.90:<0.21538.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.478,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21538.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.529,ns_1@10.242.238.90:<0.22041.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.529,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22041.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.579,ns_1@10.242.238.90:<0.22015.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.579,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22015.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.646,ns_1@10.242.238.90:<0.21965.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.646,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21965.0> (ok)
[rebalance:debug,2014-08-19T16:50:14.721,ns_1@10.242.238.90:<0.21995.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:14.721,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.21995.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.210,ns_1@10.242.238.90:<0.22348.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 718
[rebalance:debug,2014-08-19T16:50:16.211,ns_1@10.242.238.90:<0.22348.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.212,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22348.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.331,ns_1@10.242.238.90:<0.22351.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 720
[rebalance:debug,2014-08-19T16:50:16.331,ns_1@10.242.238.90:<0.22354.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 719
[rebalance:debug,2014-08-19T16:50:16.332,ns_1@10.242.238.90:<0.22351.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.333,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22351.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.333,ns_1@10.242.238.90:<0.22354.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.333,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22354.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.415,ns_1@10.242.238.90:<0.22357.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 722
[rebalance:debug,2014-08-19T16:50:16.415,ns_1@10.242.238.90:<0.22360.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 721
[rebalance:debug,2014-08-19T16:50:16.416,ns_1@10.242.238.90:<0.22357.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.416,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22357.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.416,ns_1@10.242.238.90:<0.22360.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.417,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22360.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.515,ns_1@10.242.238.90:<0.22363.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 724
[rebalance:debug,2014-08-19T16:50:16.515,ns_1@10.242.238.90:<0.22366.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 723
[rebalance:debug,2014-08-19T16:50:16.517,ns_1@10.242.238.90:<0.22363.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.517,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22363.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.517,ns_1@10.242.238.90:<0.22366.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.517,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22366.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.599,ns_1@10.242.238.90:<0.22369.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 725
[rebalance:debug,2014-08-19T16:50:16.599,ns_1@10.242.238.90:<0.22372.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 972
[rebalance:debug,2014-08-19T16:50:16.600,ns_1@10.242.238.90:<0.22372.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.600,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22372.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.600,ns_1@10.242.238.90:<0.22369.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.601,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22369.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.666,ns_1@10.242.238.90:<0.22375.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 974
[rebalance:debug,2014-08-19T16:50:16.666,ns_1@10.242.238.90:<0.22378.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 973
[rebalance:debug,2014-08-19T16:50:16.667,ns_1@10.242.238.90:<0.22375.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.667,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22375.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.668,ns_1@10.242.238.90:<0.22378.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.668,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22378.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.750,ns_1@10.242.238.90:<0.22381.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 976
[rebalance:debug,2014-08-19T16:50:16.750,ns_1@10.242.238.90:<0.22384.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 975
[rebalance:debug,2014-08-19T16:50:16.751,ns_1@10.242.238.90:<0.22381.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.751,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22381.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.752,ns_1@10.242.238.90:<0.22384.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.752,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22384.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.842,ns_1@10.242.238.90:<0.22387.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 978
[rebalance:debug,2014-08-19T16:50:16.842,ns_1@10.242.238.90:<0.22390.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 977
[rebalance:debug,2014-08-19T16:50:16.843,ns_1@10.242.238.90:<0.22387.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.843,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22387.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.844,ns_1@10.242.238.90:<0.22390.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.844,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22390.0> (ok)
[rebalance:debug,2014-08-19T16:50:16.958,ns_1@10.242.238.90:<0.22393.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 979
[rebalance:debug,2014-08-19T16:50:16.959,ns_1@10.242.238.90:<0.22393.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:16.959,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22393.0> (ok)
[ns_server:info,2014-08-19T16:50:17.497,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:do_pull:341]Pulling config from: 'ns_1@10.242.238.91'
[ns_server:debug,2014-08-19T16:50:17.959,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:17.967,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7418 us
[ns_server:debug,2014-08-19T16:50:17.967,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:17.968,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:17.969,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{464,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:17.972,ns_1@10.242.238.90:<0.22406.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 704
[rebalance:debug,2014-08-19T16:50:17.973,ns_1@10.242.238.90:<0.22406.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:17.973,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22406.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.001,ns_1@10.242.238.90:<0.22409.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 706
[ns_server:debug,2014-08-19T16:50:18.001,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:18.003,ns_1@10.242.238.90:<0.22409.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.003,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22409.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.006,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.006,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4622 us
[ns_server:debug,2014-08-19T16:50:18.006,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.007,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{466,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.045,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:18.046,ns_1@10.242.238.90:<0.22413.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 705
[rebalance:debug,2014-08-19T16:50:18.046,ns_1@10.242.238.90:<0.22416.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 708
[rebalance:debug,2014-08-19T16:50:18.047,ns_1@10.242.238.90:<0.22416.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.047,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22416.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.048,ns_1@10.242.238.90:<0.22413.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.048,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22413.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.048,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3214 us
[ns_server:debug,2014-08-19T16:50:18.048,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.049,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.049,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{465,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.065,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.069,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.070,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4758 us
[ns_server:debug,2014-08-19T16:50:18.070,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.071,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{467,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.085,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.088,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2501 us
[ns_server:debug,2014-08-19T16:50:18.088,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.088,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.089,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{469,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.115,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.119,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.119,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3814 us
[ns_server:debug,2014-08-19T16:50:18.120,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.121,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{468,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:18.145,ns_1@10.242.238.90:<0.22422.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 710
[rebalance:debug,2014-08-19T16:50:18.145,ns_1@10.242.238.90:<0.22423.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 707
[rebalance:debug,2014-08-19T16:50:18.147,ns_1@10.242.238.90:<0.22423.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:18.147,ns_1@10.242.238.90:<0.22422.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.147,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22423.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.147,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22422.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.256,ns_1@10.242.238.90:<0.22428.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 712
[rebalance:debug,2014-08-19T16:50:18.256,ns_1@10.242.238.90:<0.22431.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 709
[rebalance:debug,2014-08-19T16:50:18.257,ns_1@10.242.238.90:<0.22428.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.257,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22428.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.258,ns_1@10.242.238.90:<0.22431.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.258,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22431.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.338,ns_1@10.242.238.90:<0.22437.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 714
[rebalance:debug,2014-08-19T16:50:18.338,ns_1@10.242.238.90:<0.22440.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 711
[rebalance:debug,2014-08-19T16:50:18.339,ns_1@10.242.238.90:<0.22437.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.339,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22437.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.339,ns_1@10.242.238.90:<0.22440.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.339,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22440.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.405,ns_1@10.242.238.90:<0.22448.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 713
[rebalance:debug,2014-08-19T16:50:18.405,ns_1@10.242.238.90:<0.22451.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 716
[rebalance:debug,2014-08-19T16:50:18.406,ns_1@10.242.238.90:<0.22451.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.406,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22451.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.406,ns_1@10.242.238.90:<0.22448.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.407,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22448.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.486,ns_1@10.242.238.90:<0.22458.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 718)
[ns_server:debug,2014-08-19T16:50:18.486,ns_1@10.242.238.90:<0.22458.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.486,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22457.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.487,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.487,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.487,ns_1@10.242.238.90:<0.22459.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:18.487,ns_1@10.242.238.90:<0.22459.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:18.487,ns_1@10.242.238.90:<0.21681.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:18.488,ns_1@10.242.238.90:<0.22460.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 715
[rebalance:debug,2014-08-19T16:50:18.489,ns_1@10.242.238.90:<0.22460.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.489,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22460.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.519,ns_1@10.242.238.90:<0.22464.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 720)
[ns_server:debug,2014-08-19T16:50:18.520,ns_1@10.242.238.90:<0.22464.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.520,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22463.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.520,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.521,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.521,ns_1@10.242.238.90:<0.22465.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:18.521,ns_1@10.242.238.90:<0.22465.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:18.521,ns_1@10.242.238.90:<0.22466.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 717
[rebalance:info,2014-08-19T16:50:18.521,ns_1@10.242.238.90:<0.21616.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:18.522,ns_1@10.242.238.90:<0.22466.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.522,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22466.0> (ok)
[ns_server:info,2014-08-19T16:50:18.537,ns_1@10.242.238.90:<0.18787.0>:ns_memcached:do_handle_call:527]Changed vbucket 718 state to active
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:<0.22470.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 722)
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:<0.22470.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:<0.22472.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 721)
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22469.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:<0.22472.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22471.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.559,ns_1@10.242.238.90:<0.22474.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 724)
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:<0.22474.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:<0.22477.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 723)
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22473.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:<0.22477.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:<0.22478.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 719)
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22475.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:<0.22478.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.560,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22476.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.22480.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.22479.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.22480.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:18.561,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22479.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22483.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22482.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21541.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22481.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21566.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22482.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22483.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22481.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21516.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21656.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.21591.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:18.562,ns_1@10.242.238.90:<0.22484.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 960
[ns_server:debug,2014-08-19T16:50:18.564,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:18.571,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 720 state to active
[ns_server:debug,2014-08-19T16:50:18.572,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7886 us
[ns_server:debug,2014-08-19T16:50:18.572,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.573,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.573,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{718,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.599,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.599,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.600,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 9 us
[ns_server:debug,2014-08-19T16:50:18.600,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.600,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{720,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:18.605,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/718. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.605,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",718,active,1}
[rebalance:debug,2014-08-19T16:50:18.606,ns_1@10.242.238.90:<0.22484.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.606,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22484.0> (ok)
[ns_server:info,2014-08-19T16:50:18.610,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 723 state to active
[ns_server:info,2014-08-19T16:50:18.631,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 719 state to active
[ns_server:debug,2014-08-19T16:50:18.632,ns_1@10.242.238.90:<0.22490.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 725)
[ns_server:debug,2014-08-19T16:50:18.632,ns_1@10.242.238.90:<0.22490.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:18.632,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22489.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.633,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:18.633,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:18.634,ns_1@10.242.238.90:<0.22491.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 962
[ns_server:debug,2014-08-19T16:50:18.634,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:18.634,ns_1@10.242.238.90:<0.22492.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:18.634,ns_1@10.242.238.90:<0.22492.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:18.634,ns_1@10.242.238.90:<0.21485.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:18.636,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.637,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3067 us
[ns_server:debug,2014-08-19T16:50:18.637,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.638,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{723,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:18.642,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 721 state to active
[ns_server:debug,2014-08-19T16:50:18.654,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.658,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3275 us
[ns_server:debug,2014-08-19T16:50:18.658,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.658,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.659,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{719,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:18.665,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 722 state to active
[ns_server:debug,2014-08-19T16:50:18.679,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.681,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1128 us
[ns_server:debug,2014-08-19T16:50:18.681,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[views:debug,2014-08-19T16:50:18.681,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/720. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.681,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.681,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",720,active,1}
[ns_server:debug,2014-08-19T16:50:18.682,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{721,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:18.682,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 724 state to active
[rebalance:debug,2014-08-19T16:50:18.684,ns_1@10.242.238.90:<0.22497.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 964
[rebalance:debug,2014-08-19T16:50:18.684,ns_1@10.242.238.90:<0.22500.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 961
[ns_server:info,2014-08-19T16:50:18.698,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 725 state to active
[ns_server:debug,2014-08-19T16:50:18.703,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.707,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.707,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3143 us
[ns_server:debug,2014-08-19T16:50:18.707,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.708,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{722,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:18.721,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/723. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.721,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",723,active,1}
[ns_server:debug,2014-08-19T16:50:18.724,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:18.732,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 8170 us
[ns_server:debug,2014-08-19T16:50:18.732,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.733,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.733,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{724,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:18.750,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:50:18.755,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/721. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.755,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",721,active,1}
[ns_server:debug,2014-08-19T16:50:18.755,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.755,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4917 us
[ns_server:debug,2014-08-19T16:50:18.756,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:18.756,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{725,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:18.788,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/719. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.788,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",719,active,1}
[rebalance:debug,2014-08-19T16:50:18.808,ns_1@10.242.238.90:<0.22506.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 966
[rebalance:debug,2014-08-19T16:50:18.808,ns_1@10.242.238.90:<0.22507.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 963
[views:debug,2014-08-19T16:50:18.839,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/725. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.839,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",725,active,1}
[views:debug,2014-08-19T16:50:18.897,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/724. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.897,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",724,active,1}
[rebalance:debug,2014-08-19T16:50:18.910,ns_1@10.242.238.90:<0.22512.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 968
[rebalance:debug,2014-08-19T16:50:18.910,ns_1@10.242.238.90:<0.22515.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 965
[views:debug,2014-08-19T16:50:18.932,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/722. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:18.932,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",722,active,1}
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22491.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22500.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22491.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22507.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22497.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22500.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22506.0>:janitor_agent:handle_call:795]Done
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22515.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22507.0> (ok)
[rebalance:debug,2014-08-19T16:50:18.933,ns_1@10.242.238.90:<0.22512.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:18.934,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22497.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.934,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22506.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.934,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22515.0> (ok)
[ns_server:debug,2014-08-19T16:50:18.934,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22512.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.025,ns_1@10.242.238.90:<0.22518.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 970
[rebalance:debug,2014-08-19T16:50:19.026,ns_1@10.242.238.90:<0.22521.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 967
[rebalance:debug,2014-08-19T16:50:19.027,ns_1@10.242.238.90:<0.22518.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:19.027,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22518.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.027,ns_1@10.242.238.90:<0.22521.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:19.027,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22521.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.142,ns_1@10.242.238.90:<0.22524.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 969
[rebalance:debug,2014-08-19T16:50:19.143,ns_1@10.242.238.90:<0.22524.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:19.143,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22524.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.187,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.188,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.188,ns_1@10.242.238.90:<0.22527.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.188,ns_1@10.242.238.90:<0.22527.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.188,ns_1@10.242.238.90:<0.21676.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.192,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 972 state to replica
[ns_server:info,2014-08-19T16:50:19.192,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([972], [])
[ns_server:debug,2014-08-19T16:50:19.193,ns_1@10.242.238.90:<0.22528.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249014>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,980,981,982,983,984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.193,ns_1@10.242.238.90:<0.22528.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.21456.0>
[ns_server:info,2014-08-19T16:50:19.194,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.207,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.208,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.208,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.208,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.22530.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.22530.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.21456.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.209,ns_1@10.242.238.90:<0.22528.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.21456.0>
[ns_server:debug,2014-08-19T16:50:19.210,ns_1@10.242.238.90:<0.22528.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.210,ns_1@10.242.238.90:<0.22532.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.210,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.21456.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.21457.0>,<<"cut off">>,<<"cut off">>,[],136,false,false,0,
{1408,452619,208725},
completed,
{<0.22528.0>,#Ref<0.0.0.249027>},
<<"replication_ns_1@10.242.238.90">>,<0.21456.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.210,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22528.0>,{#Ref<0.0.0.249016>,<0.22532.0>}}
[rebalance:debug,2014-08-19T16:50:19.211,ns_1@10.242.238.90:<0.22533.0>:janitor_agent:handle_call:793]Going to wait for persistence of checkpoint 1 in vbucket 971
[error_logger:info,2014-08-19T16:50:19.210,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22532.0>},
{name,
{new_child_id,
[972,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[rebalance:debug,2014-08-19T16:50:19.212,ns_1@10.242.238.90:<0.22533.0>:janitor_agent:handle_call:795]Done
[ns_server:debug,2014-08-19T16:50:19.213,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22533.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.217,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.220,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2359 us
[ns_server:debug,2014-08-19T16:50:19.220,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.220,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.221,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{972,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.223,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.224,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22537.0>
[rebalance:debug,2014-08-19T16:50:19.236,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.236,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.236,ns_1@10.242.238.90:<0.22538.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.237,ns_1@10.242.238.90:<0.22538.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.237,ns_1@10.242.238.90:<0.21611.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.240,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 974 state to replica
[ns_server:info,2014-08-19T16:50:19.240,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,974,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([974], [])
[ns_server:debug,2014-08-19T16:50:19.241,ns_1@10.242.238.90:<0.22539.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,974,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249180>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,974,980,981,982,983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.241,ns_1@10.242.238.90:<0.22539.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22532.0>
[ns_server:info,2014-08-19T16:50:19.241,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.250,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{974,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22541.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.251,ns_1@10.242.238.90:<0.22541.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22532.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22539.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22532.0>
[ns_server:debug,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22539.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.252,ns_1@10.242.238.90:<0.22543.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.253,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22532.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22537.0>,<<"cut off">>,<<"cut off">>,[],139,false,false,0,
{1408,452619,251395},
completed,
{<0.22539.0>,#Ref<0.0.0.249193>},
<<"replication_ns_1@10.242.238.90">>,<0.22532.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.253,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22539.0>,{#Ref<0.0.0.249182>,<0.22543.0>}}
[error_logger:info,2014-08-19T16:50:19.253,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22543.0>},
{name,
{new_child_id,
[972,974,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,974,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.257,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.261,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.261,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3632 us
[ns_server:debug,2014-08-19T16:50:19.261,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,974,980,981,982,983,984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.261,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22545.0>
[ns_server:debug,2014-08-19T16:50:19.261,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.262,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{974,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:19.291,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.292,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.292,ns_1@10.242.238.90:<0.22546.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.292,ns_1@10.242.238.90:<0.22546.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.292,ns_1@10.242.238.90:<0.21561.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.295,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 976 state to replica
[ns_server:info,2014-08-19T16:50:19.296,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,974,976,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([976], [])
[ns_server:debug,2014-08-19T16:50:19.297,ns_1@10.242.238.90:<0.22547.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,974,976,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249340>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,974,976,980,981,982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.297,ns_1@10.242.238.90:<0.22547.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22543.0>
[ns_server:info,2014-08-19T16:50:19.297,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:50:19.308,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.308,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.308,ns_1@10.242.238.90:<0.22549.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.308,ns_1@10.242.238.90:<0.22549.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.308,ns_1@10.242.238.90:<0.21636.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.311,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{974,1},
{976,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.312,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.312,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.312,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22550.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22550.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22543.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.313,ns_1@10.242.238.90:<0.22547.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22543.0>
[ns_server:debug,2014-08-19T16:50:19.314,ns_1@10.242.238.90:<0.22547.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.314,ns_1@10.242.238.90:<0.22552.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.314,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22543.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22545.0>,<<"cut off">>,<<"cut off">>,[],142,false,false,0,
{1408,452619,312758},
completed,
{<0.22547.0>,#Ref<0.0.0.249353>},
<<"replication_ns_1@10.242.238.90">>,<0.22543.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.314,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22547.0>,{#Ref<0.0.0.249342>,<0.22552.0>}}
[error_logger:info,2014-08-19T16:50:19.314,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22552.0>},
{name,
{new_child_id,
[972,974,976,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,974,976,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.319,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.324,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.326,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{976,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.326,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,974,976,980,981,982,983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.326,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22554.0>
[ns_server:debug,2014-08-19T16:50:19.326,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6853 us
[ns_server:debug,2014-08-19T16:50:19.326,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:50:19.331,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 973 state to replica
[ns_server:info,2014-08-19T16:50:19.331,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,973,974,976,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([973], [])
[ns_server:debug,2014-08-19T16:50:19.332,ns_1@10.242.238.90:<0.22555.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,973,974,976,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249489>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,973,974,976,980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.332,ns_1@10.242.238.90:<0.22555.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22552.0>
[ns_server:info,2014-08-19T16:50:19.332,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.341,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{973,1},
{974,1},
{976,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.341,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22557.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22557.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.342,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22552.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22555.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22552.0>
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22555.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22559.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.343,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22552.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22554.0>,<<"cut off">>,<<"cut off">>,[],145,false,false,0,
{1408,452619,342151},
completed,
{<0.22555.0>,#Ref<0.0.0.249502>},
<<"replication_ns_1@10.242.238.90">>,<0.22552.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.344,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22555.0>,{#Ref<0.0.0.249491>,<0.22559.0>}}
[error_logger:info,2014-08-19T16:50:19.344,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22559.0>},
{name,
{new_child_id,
[972,973,974,976,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,997,
998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,973,974,976,980,981,982,983,984,985,
986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.348,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.352,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,973,974,976,980,981,982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.352,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22560.0>
[ns_server:debug,2014-08-19T16:50:19.353,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.353,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4642 us
[ns_server:debug,2014-08-19T16:50:19.353,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.354,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{973,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:19.433,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.433,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.433,ns_1@10.242.238.90:<0.22568.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.433,ns_1@10.242.238.90:<0.22568.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.433,ns_1@10.242.238.90:<0.21586.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.437,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 975 state to replica
[ns_server:info,2014-08-19T16:50:19.437,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,973,974,975,976,980,981,982,983,984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([975], [])
[ns_server:debug,2014-08-19T16:50:19.438,ns_1@10.242.238.90:<0.22569.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,973,974,975,976,980,981,982,983,984,985,
986,987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249654>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,973,974,975,976,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.439,ns_1@10.242.238.90:<0.22569.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22559.0>
[ns_server:info,2014-08-19T16:50:19.439,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[rebalance:debug,2014-08-19T16:50:19.448,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.449,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.449,ns_1@10.242.238.90:<0.22571.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.449,ns_1@10.242.238.90:<0.22571.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.449,ns_1@10.242.238.90:<0.21511.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.451,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.452,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22572.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22572.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.453,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22559.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22569.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22559.0>
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22569.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22574.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.454,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22559.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22560.0>,<<"cut off">>,<<"cut off">>,[],148,false,false,0,
{1408,452619,453179},
completed,
{<0.22569.0>,#Ref<0.0.0.249667>},
<<"replication_ns_1@10.242.238.90">>,<0.22559.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.455,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22569.0>,{#Ref<0.0.0.249656>,<0.22574.0>}}
[error_logger:info,2014-08-19T16:50:19.455,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22574.0>},
{name,
{new_child_id,
[972,973,974,975,976,980,981,982,983,984,985,
986,987,988,989,990,991,992,993,994,995,996,
997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,973,974,975,976,980,981,982,983,984,
985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.459,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.464,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,973,974,975,976,980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.466,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22575.0>
[ns_server:debug,2014-08-19T16:50:19.467,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.467,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7734 us
[ns_server:debug,2014-08-19T16:50:19.468,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.468,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{975,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:19.470,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 978 state to replica
[ns_server:info,2014-08-19T16:50:19.470,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,973,974,975,976,978,980,981,982,983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([978], [])
[ns_server:debug,2014-08-19T16:50:19.471,ns_1@10.242.238.90:<0.22577.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,973,974,975,976,978,980,981,982,983,984,
985,986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.249810>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,973,974,975,976,978,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.471,ns_1@10.242.238.90:<0.22577.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22574.0>
[ns_server:info,2014-08-19T16:50:19.471,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.480,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{978,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.481,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.481,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.481,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.481,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22579.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22579.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22574.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.482,ns_1@10.242.238.90:<0.22577.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22574.0>
[ns_server:debug,2014-08-19T16:50:19.483,ns_1@10.242.238.90:<0.22577.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.483,ns_1@10.242.238.90:<0.22581.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.483,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22574.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22575.0>,<<"cut off">>,<<"cut off">>,[],151,false,false,0,
{1408,452619,481659},
completed,
{<0.22577.0>,#Ref<0.0.0.249823>},
<<"replication_ns_1@10.242.238.90">>,<0.22574.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.483,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22577.0>,{#Ref<0.0.0.249812>,<0.22581.0>}}
[error_logger:info,2014-08-19T16:50:19.483,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22581.0>},
{name,
{new_child_id,
[972,973,974,975,976,978,980,981,982,983,984,
985,986,987,988,989,990,991,992,993,994,995,
996,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,973,974,975,976,978,980,981,982,983,
984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.489,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.492,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,973,974,975,976,978,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.493,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22582.0>
[ns_server:debug,2014-08-19T16:50:19.495,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.495,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 6085 us
[ns_server:debug,2014-08-19T16:50:19.496,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.497,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{978,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:<0.22587.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 712)
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:<0.22587.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22584.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:<0.22589.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 706)
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:<0.22589.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.607,ns_1@10.242.238.90:<0.22593.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 707)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22593.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22585.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22586.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22596.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 708)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22596.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22599.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 710)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22588.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22599.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22601.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 704)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22590.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22601.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22603.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 709)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22591.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22603.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22604.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 705)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22604.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.22605.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 715)
[ns_server:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22592.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.608,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22605.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22606.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 713)
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22594.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22606.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22607.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 714)
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22595.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22609.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22607.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22610.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22597.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22611.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22613.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 716)
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22612.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22609.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22610.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22598.0> (ok)
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22611.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22613.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.609,ns_1@10.242.238.90:<0.22612.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22614.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 711)
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21973.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21835.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21948.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21998.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22616.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22615.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22614.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22617.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22600.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22618.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22619.0>:capi_set_view_manager:do_wait_index_updated:618]References to wait: [] ("default", 717)
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22616.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22615.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22617.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22602.0> (ok)
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22038.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21888.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21923.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22619.0>:capi_set_view_manager:do_wait_index_updated:638]All refs fired
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22620.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22618.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22621.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.22018.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22620.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:janitor_agent-default<0.18778.0>:janitor_agent:handle_info:854]Got done message from subprocess: <0.22608.0> (ok)
[rebalance:debug,2014-08-19T16:50:19.610,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22621.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21756.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[rebalance:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22622.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[rebalance:info,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21812.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22622.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22623.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22624.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21787.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22623.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[ns_server:debug,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.22624.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.611,ns_1@10.242.238.90:<0.21731.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.612,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.612,ns_1@10.242.238.90:<0.22625.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[rebalance:info,2014-08-19T16:50:19.612,ns_1@10.242.238.90:<0.21863.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.612,ns_1@10.242.238.90:<0.22625.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.612,ns_1@10.242.238.90:<0.21706.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[rebalance:debug,2014-08-19T16:50:19.697,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.698,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.698,ns_1@10.242.238.90:<0.22626.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.698,ns_1@10.242.238.90:<0.22626.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.698,ns_1@10.242.238.90:<0.21536.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:19.702,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 977 state to replica
[ns_server:info,2014-08-19T16:50:19.702,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023] ([977], [])
[ns_server:debug,2014-08-19T16:50:19.703,ns_1@10.242.238.90:<0.22627.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[972,973,974,975,976,977,978,980,981,982,983,
984,985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.250331>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.703,ns_1@10.242.238.90:<0.22627.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22581.0>
[ns_server:info,2014-08-19T16:50:19.704,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.717,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{977,1},
{978,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22629.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.718,ns_1@10.242.238.90:<0.22629.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22581.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22627.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22581.0>
[ns_server:debug,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22627.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.719,ns_1@10.242.238.90:<0.22631.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.720,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22581.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22582.0>,<<"cut off">>,<<"cut off">>,[],154,false,false,0,
{1408,452619,718296},
completed,
{<0.22627.0>,#Ref<0.0.0.250344>},
<<"replication_ns_1@10.242.238.90">>,<0.22581.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.720,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22627.0>,{#Ref<0.0.0.250333>,<0.22631.0>}}
[error_logger:info,2014-08-19T16:50:19.720,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22631.0>},
{name,
{new_child_id,
[972,973,974,975,976,977,978,980,981,982,983,
984,985,986,987,988,989,990,991,992,993,994,
995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[972,973,974,975,976,977,978,980,981,982,
983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,
1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:19.727,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.729,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:19.729,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22632.0>
[ns_server:debug,2014-08-19T16:50:19.730,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.730,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3298 us
[ns_server:debug,2014-08-19T16:50:19.730,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.731,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{977,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.754,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.757,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.758,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3765 us
[ns_server:debug,2014-08-19T16:50:19.758,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.758,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{462,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.774,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.777,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.777,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2700 us
[ns_server:debug,2014-08-19T16:50:19.777,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.778,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{452,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:19.787,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 708 state to active
[ns_server:debug,2014-08-19T16:50:19.806,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.810,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.810,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3362 us
[ns_server:debug,2014-08-19T16:50:19.810,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.811,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{448,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:19.820,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 713 state to active
[ns_server:debug,2014-08-19T16:50:19.835,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{463,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.833,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.833,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.840,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 19 us
[ns_server:debug,2014-08-19T16:50:19.841,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:50:19.864,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.864,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.864,ns_1@10.242.238.90:<0.22638.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.865,ns_1@10.242.238.90:<0.22638.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.865,ns_1@10.242.238.90:<0.21819.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[views:debug,2014-08-19T16:50:19.865,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/708. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:19.865,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",708,active,1}
[ns_server:debug,2014-08-19T16:50:19.870,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.874,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.874,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3938 us
[ns_server:debug,2014-08-19T16:50:19.874,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.875,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{450,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:19.895,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 707 state to active
[ns_server:debug,2014-08-19T16:50:19.899,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[views:debug,2014-08-19T16:50:19.900,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/713. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:19.901,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1293 us
[ns_server:debug,2014-08-19T16:50:19.901,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",713,active,1}
[ns_server:debug,2014-08-19T16:50:19.901,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.901,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.902,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{708,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.918,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.921,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.921,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2924 us
[ns_server:debug,2014-08-19T16:50:19.921,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.922,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{461,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:19.932,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/707. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:19.933,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",707,active,1}
[rebalance:debug,2014-08-19T16:50:19.937,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:19.937,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.938,ns_1@10.242.238.90:<0.22641.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.938,ns_1@10.242.238.90:<0.22641.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.938,ns_1@10.242.238.90:<0.21943.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.941,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.943,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1399 us
[ns_server:debug,2014-08-19T16:50:19.943,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.943,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.944,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{713,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.966,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:19.968,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 710 state to active
[ns_server:debug,2014-08-19T16:50:19.969,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2695 us
[ns_server:debug,2014-08-19T16:50:19.969,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:19.970,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{455,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:19.970,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:50:19.972,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 966 state to replica
[ns_server:info,2014-08-19T16:50:19.973,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023] ([966], [])
[ns_server:debug,2014-08-19T16:50:19.974,ns_1@10.242.238.90:<0.22643.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[966,972,973,974,975,976,977,978,980,981,982,
983,984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.250831>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:19.974,ns_1@10.242.238.90:<0.22643.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22631.0>
[ns_server:info,2014-08-19T16:50:19.974,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:19.986,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{966,1},
{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{977,1},
{978,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:19.987,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:19.987,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:19.987,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22645.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22645.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:19.988,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22631.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22643.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22631.0>
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22643.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22647.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22631.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22632.0>,<<"cut off">>,<<"cut off">>,[],157,false,false,0,
{1408,452619,987773},
completed,
{<0.22643.0>,#Ref<0.0.0.250844>},
<<"replication_ns_1@10.242.238.90">>,<0.22631.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:19.989,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22643.0>,{#Ref<0.0.0.250833>,<0.22647.0>}}
[error_logger:info,2014-08-19T16:50:19.989,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22647.0>},
{name,
{new_child_id,
[966,972,973,974,975,976,977,978,980,981,982,
983,984,985,986,987,988,989,990,991,992,993,
994,995,996,997,998,999,1000,1001,1002,1003,
1004,1005,1006,1007,1008,1009,1010,1011,1012,
1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[966,972,973,974,975,976,977,978,980,981,
982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,
1009,1010,1011,1012,1013,1014,1015,1016,
1017,1018,1019,1020,1021,1022,1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:info,2014-08-19T16:50:19.990,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 704 state to active
[ns_server:debug,2014-08-19T16:50:19.996,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:19.999,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[views:debug,2014-08-19T16:50:20.000,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/710. Updated state: active (1)
[rebalance:debug,2014-08-19T16:50:20.000,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22648.0>
[ns_server:debug,2014-08-19T16:50:20.000,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",710,active,1}
[ns_server:debug,2014-08-19T16:50:20.005,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 9122 us
[ns_server:debug,2014-08-19T16:50:20.005,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.007,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{966,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:20.007,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:50:20.017,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 709 state to active
[ns_server:debug,2014-08-19T16:50:20.027,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.031,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.031,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3663 us
[ns_server:debug,2014-08-19T16:50:20.031,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.032,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{460,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:20.033,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/704. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.033,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",704,active,1}
[ns_server:debug,2014-08-19T16:50:20.049,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:20.051,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.051,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.051,ns_1@10.242.238.90:<0.22651.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.051,ns_1@10.242.238.90:<0.22651.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.051,ns_1@10.242.238.90:<0.21968.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.052,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2748 us
[ns_server:debug,2014-08-19T16:50:20.052,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.052,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.053,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{707,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:20.072,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.072,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.072,ns_1@10.242.238.90:<0.22653.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.073,ns_1@10.242.238.90:<0.22653.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.074,ns_1@10.242.238.90:<0.21466.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.080,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[rebalance:debug,2014-08-19T16:50:20.082,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.082,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.082,ns_1@10.242.238.90:<0.22654.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.082,ns_1@10.242.238.90:<0.22654.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.083,ns_1@10.242.238.90:<0.21916.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[views:debug,2014-08-19T16:50:20.084,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/709. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.084,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",709,active,1}
[ns_server:debug,2014-08-19T16:50:20.084,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.085,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4190 us
[ns_server:debug,2014-08-19T16:50:20.086,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.086,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{459,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:20.088,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 962 state to replica
[ns_server:info,2014-08-19T16:50:20.088,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[962,966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,
1022,1023] ([962], [])
[ns_server:debug,2014-08-19T16:50:20.090,ns_1@10.242.238.90:<0.22655.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[962,966,972,973,974,975,976,977,978,980,981,
982,983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.251199>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[962,966,972,973,974,975,976,977,978,980,981,982,983,984,985,
986,987,988,989,990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:20.090,ns_1@10.242.238.90:<0.22655.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22647.0>
[ns_server:info,2014-08-19T16:50:20.091,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:20.105,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{962,1},
{966,1},
{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{977,1},
{978,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.106,ns_1@10.242.238.90:<0.22658.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22658.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22647.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22655.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22647.0>
[ns_server:debug,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22655.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:20.107,ns_1@10.242.238.90:<0.22660.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:20.108,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22647.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22648.0>,<<"cut off">>,<<"cut off">>,[],160,false,false,0,
{1408,452620,106333},
completed,
{<0.22655.0>,#Ref<0.0.0.251212>},
<<"replication_ns_1@10.242.238.90">>,<0.22647.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:20.108,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22655.0>,{#Ref<0.0.0.251201>,<0.22660.0>}}
[error_logger:info,2014-08-19T16:50:20.108,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22660.0>},
{name,
{new_child_id,
[962,966,972,973,974,975,976,977,978,980,981,
982,983,984,985,986,987,988,989,990,991,992,
993,994,995,996,997,998,999,1000,1001,1002,
1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[962,966,972,973,974,975,976,977,978,980,
981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:20.113,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.117,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[962,966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[ns_server:debug,2014-08-19T16:50:20.118,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 4394 us
[rebalance:debug,2014-08-19T16:50:20.118,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22661.0>
[ns_server:debug,2014-08-19T16:50:20.118,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.118,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[rebalance:debug,2014-08-19T16:50:20.120,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.120,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{962,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:20.120,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.120,ns_1@10.242.238.90:<0.22662.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.120,ns_1@10.242.238.90:<0.22662.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.121,ns_1@10.242.238.90:<0.21993.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:20.130,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 715 state to active
[ns_server:debug,2014-08-19T16:50:20.138,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.142,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{457,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:debug,2014-08-19T16:50:20.142,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3473 us
[ns_server:debug,2014-08-19T16:50:20.144,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.145,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:info,2014-08-19T16:50:20.150,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 705 state to active
[views:debug,2014-08-19T16:50:20.158,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/715. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.159,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",715,active,1}
[ns_server:debug,2014-08-19T16:50:20.164,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:20.168,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 714 state to active
[ns_server:debug,2014-08-19T16:50:20.172,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 7876 us
[ns_server:debug,2014-08-19T16:50:20.173,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.173,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.174,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{710,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:20.188,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.188,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.188,ns_1@10.242.238.90:<0.22665.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.188,ns_1@10.242.238.90:<0.22665.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.189,ns_1@10.242.238.90:<0.21768.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.190,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.193,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.193,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2977 us
[ns_server:debug,2014-08-19T16:50:20.194,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.194,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{704,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[views:debug,2014-08-19T16:50:20.204,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/705. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.205,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",705,active,1}
[rebalance:debug,2014-08-19T16:50:20.210,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.210,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.210,ns_1@10.242.238.90:<0.22667.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.211,ns_1@10.242.238.90:<0.22667.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.211,ns_1@10.242.238.90:<0.21883.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.215,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.216,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 1141 us
[ns_server:debug,2014-08-19T16:50:20.217,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.218,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.218,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{709,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.90','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:20.220,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 716 state to active
[ns_server:debug,2014-08-19T16:50:20.235,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:info,2014-08-19T16:50:20.237,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 706 state to active
[ns_server:debug,2014-08-19T16:50:20.238,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.239,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3817 us
[ns_server:debug,2014-08-19T16:50:20.239,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.240,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{453,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:20.253,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 717 state to active
[ns_server:debug,2014-08-19T16:50:20.256,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.259,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 3007 us
[ns_server:debug,2014-08-19T16:50:20.259,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.260,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.261,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{454,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.89','ns_1@10.242.238.91']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:20.266,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 961 state to replica
[ns_server:info,2014-08-19T16:50:20.267,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[961,962,966,972,973,974,975,976,977,978,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,
1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,
1021,1022,1023] ([961], [])
[ns_server:debug,2014-08-19T16:50:20.268,ns_1@10.242.238.90:<0.22670.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[961,962,966,972,973,974,975,976,977,978,980,
981,982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.251647>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[961,962,966,972,973,974,975,976,977,978,980,981,982,983,984,
985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,
1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:20.268,ns_1@10.242.238.90:<0.22670.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22660.0>
[ns_server:info,2014-08-19T16:50:20.268,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[views:debug,2014-08-19T16:50:20.271,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/714. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.272,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",714,active,1}
[rebalance:debug,2014-08-19T16:50:20.277,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.278,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.278,ns_1@10.242.238.90:<0.22672.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.278,ns_1@10.242.238.90:<0.22672.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.278,ns_1@10.242.238.90:<0.21751.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:info,2014-08-19T16:50:20.283,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{961,1},
{962,1},
{966,1},
{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{977,1},
{978,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:20.283,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22673.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22673.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.284,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22660.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22670.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22660.0>
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22670.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22675.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:20.285,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22660.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22661.0>,<<"cut off">>,<<"cut off">>,[],163,false,false,0,
{1408,452620,284082},
completed,
{<0.22670.0>,#Ref<0.0.0.251662>},
<<"replication_ns_1@10.242.238.90">>,<0.22660.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:20.286,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22670.0>,{#Ref<0.0.0.251649>,<0.22675.0>}}
[error_logger:info,2014-08-19T16:50:20.286,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22675.0>},
{name,
{new_child_id,
[961,962,966,972,973,974,975,976,977,978,980,
981,982,983,984,985,986,987,988,989,990,991,
992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[961,962,966,972,973,974,975,976,977,978,
980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,
1000,1001,1002,1003,1004,1005,1006,1007,
1008,1009,1010,1011,1012,1013,1014,1015,
1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[ns_server:debug,2014-08-19T16:50:20.291,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.293,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.293,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 2708 us
[ns_server:debug,2014-08-19T16:50:20.294,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.294,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[961,962,966,972,973,974,975,976,977,978,980,981,982,983,984,985,
986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[ns_server:debug,2014-08-19T16:50:20.295,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{961,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[rebalance:debug,2014-08-19T16:50:20.295,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22676.0>
[ns_server:info,2014-08-19T16:50:20.297,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 979 state to replica
[ns_server:info,2014-08-19T16:50:20.297,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[961,962,966,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,
988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023] ([979], [])
[ns_server:debug,2014-08-19T16:50:20.298,ns_1@10.242.238.90:<0.22678.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[961,962,966,972,973,974,975,976,977,978,979,
980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.251827>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun},
{on_not_ready_vbuckets,#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,[961,962,966,972,973,974,975,976,977,978,979,980,981,982,983,
984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]
[ns_server:debug,2014-08-19T16:50:20.299,ns_1@10.242.238.90:<0.22678.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:139]Linked myself to old ebucketmigrator <0.22675.0>
[ns_server:info,2014-08-19T16:50:20.299,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:handle_call:270]Starting new-style vbucket filter change on stream `replication_ns_1@10.242.238.90`
[ns_server:info,2014-08-19T16:50:20.307,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:handle_call:297]Changing vbucket filter on tap stream `replication_ns_1@10.242.238.90`:
[{961,1},
{962,1},
{966,1},
{972,1},
{973,1},
{974,1},
{975,1},
{976,1},
{977,1},
{978,1},
{979,1},
{980,1},
{981,1},
{982,1},
{983,1},
{984,1},
{985,1},
{986,1},
{987,1},
{988,1},
{989,1},
{990,1},
{991,1},
{992,1},
{993,1},
{994,1},
{995,1},
{996,1},
{997,1},
{998,1},
{999,1},
{1000,1},
{1001,1},
{1002,1},
{1003,1},
{1004,1},
{1005,1},
{1006,1},
{1007,1},
{1008,1},
{1009,1},
{1010,1},
{1011,1},
{1012,1},
{1013,1},
{1014,1},
{1015,1},
{1016,1},
{1017,1},
{1018,1},
{1019,1},
{1020,1},
{1021,1},
{1022,1},
{1023,1}]
[ns_server:info,2014-08-19T16:50:20.308,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:handle_call:307]Successfully changed vbucket filter on tap stream `replication_ns_1@10.242.238.90`.
[ns_server:info,2014-08-19T16:50:20.308,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:process_upstream:1027]Got vbucket filter change completion message. Silencing upstream sender
[ns_server:info,2014-08-19T16:50:20.308,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:handle_info:366]Got reply from upstream silencing request. Completing state transition to a new ebucketmigrator.
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:complete_native_vb_filter_change:170]Proceeding with reading unread binaries
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:confirm_downstream:197]Going to confirm reception downstream messages
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22680.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22680.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:confirm_downstream:201]Confirmed upstream messages are feeded to kernel
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:reply_and_die:210]Passed old state to caller
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22675.0>:ebucketmigrator_srv:reply_and_die:213]Sent out state. Preparing to die
[ns_server:debug,2014-08-19T16:50:20.309,ns_1@10.242.238.90:<0.22678.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:143]Got old state from previous ebucketmigrator: <0.22675.0>
[ns_server:debug,2014-08-19T16:50:20.310,ns_1@10.242.238.90:<0.22678.0>:ns_vbm_new_sup:perform_vbucket_filter_change_loop:184]Sent old state to new instance
[ns_server:info,2014-08-19T16:50:20.310,ns_1@10.242.238.90:<0.22682.0>:ns_vbm_new_sup:mk_old_state_retriever:83]Got vbucket filter change old state. Proceeding vbucket filter change operation
[ns_server:debug,2014-08-19T16:50:20.310,ns_1@10.242.238.90:<0.22682.0>:ebucketmigrator_srv:init:494]Got old ebucketmigrator state from <0.22675.0>:
{state,#Port<0.13886>,#Port<0.13883>,#Port<0.13887>,#Port<0.13884>,
<0.22676.0>,<<"cut off">>,<<"cut off">>,[],166,false,false,0,
{1408,452620,308761},
completed,
{<0.22678.0>,#Ref<0.0.0.251840>},
<<"replication_ns_1@10.242.238.90">>,<0.22675.0>,
{had_backfill,false,undefined,[]},
completed,false}.
[ns_server:debug,2014-08-19T16:50:20.310,ns_1@10.242.238.90:<0.17162.0>:ns_process_registry:handle_info:98]Got exit msg: {'EXIT',<0.22678.0>,{#Ref<0.0.0.251829>,<0.22682.0>}}
[error_logger:info,2014-08-19T16:50:20.310,ns_1@10.242.238.90:error_logger<0.6.0>:ale_error_logger_handler:log_report:115]
=========================PROGRESS REPORT=========================
supervisor: {local,'ns_vbm_new_sup-default'}
started: [{pid,<0.22682.0>},
{name,
{new_child_id,
[961,962,966,972,973,974,975,976,977,978,979,
980,981,982,983,984,985,986,987,988,989,990,
991,992,993,994,995,996,997,998,999,1000,1001,
1002,1003,1004,1005,1006,1007,1008,1009,1010,
1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023],
'ns_1@10.242.238.91'}},
{mfargs,
{ebucketmigrator_srv,start_link,
[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,
#Fun},
{on_not_ready_vbuckets,
#Fun},
{username,"default"},
{password,get_from_config},
{vbuckets,
[961,962,966,972,973,974,975,976,977,978,
979,980,981,982,983,984,985,986,987,988,
989,990,991,992,993,994,995,996,997,998,
999,1000,1001,1002,1003,1004,1005,1006,
1007,1008,1009,1010,1011,1012,1013,1014,
1015,1016,1017,1018,1019,1020,1021,1022,
1023]},
{set_to_pending_state,false},
{takeover,false},
{suffix,"ns_1@10.242.238.90"}]]}},
{restart_type,temporary},
{shutdown,60000},
{child_type,worker}]
[rebalance:debug,2014-08-19T16:50:20.311,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:terminate:737]Dying with reason: shutdown
[ns_server:debug,2014-08-19T16:50:20.311,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:confirm_sent_messages:805]Going to wait for reception of opaque message ack
[ns_server:debug,2014-08-19T16:50:20.312,ns_1@10.242.238.90:<0.22683.0>:ebucketmigrator_srv:confirm_sent_messages:796]Sending opaque message to confirm downstream reception
[ns_server:debug,2014-08-19T16:50:20.312,ns_1@10.242.238.90:<0.22683.0>:ebucketmigrator_srv:confirm_sent_messages:801]Opaque message was succesfully sent
[rebalance:info,2014-08-19T16:50:20.312,ns_1@10.242.238.90:<0.21726.0>:ebucketmigrator_srv:do_confirm_sent_messages:775]Got close ack!
[ns_server:debug,2014-08-19T16:50:20.315,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:113]Got full synchronization request from 'ns_1@10.242.238.88'
[ns_server:debug,2014-08-19T16:50:20.321,ns_1@10.242.238.90:<0.22682.0>:ebucketmigrator_srv:init:621]Reusing old upstream:
[{vbuckets,[961,962,966,972,973,974,975,976,977,978,979,980,981,982,983,984,
985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,
1014,1015,1016,1017,1018,1019,1020,1021,1022,1023]},
{name,<<"replication_ns_1@10.242.238.90">>},
{takeover,false}]
[rebalance:debug,2014-08-19T16:50:20.322,ns_1@10.242.238.90:<0.22682.0>:ebucketmigrator_srv:init:640]upstream_sender pid: <0.22684.0>
[views:debug,2014-08-19T16:50:20.322,ns_1@10.242.238.90:mc_couch_events<0.17533.0>:capi_set_view_manager:handle_mc_couch_event:539]Got set_vbucket event for default/716. Updated state: active (1)
[ns_server:debug,2014-08-19T16:50:20.322,ns_1@10.242.238.90:<0.18779.0>:mc_connection:do_notify_vbucket_update:126]Signaled mc_couch_event: {set_vbucket,"default",716,active,1}
[ns_server:debug,2014-08-19T16:50:20.324,ns_1@10.242.238.90:ns_config_rep<0.17414.0>:ns_config_rep:handle_call:119]Fully synchronized config in 8027 us
[ns_server:debug,2014-08-19T16:50:20.324,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.324,ns_1@10.242.238.90:capi_set_view_manager-default<0.18761.0>:capi_set_view_manager:handle_info:359]doing replicate_newnodes_docs
[ns_server:debug,2014-08-19T16:50:20.325,ns_1@10.242.238.90:ns_config_log<0.17158.0>:ns_config_log:log_common:138]config change:
buckets ->
[{configs,[{"default",
[{map,[{979,
['ns_1@10.242.238.88',undefined],
['ns_1@10.242.238.91','ns_1@10.242.238.90']}]},
{fastForwardMap,[]},
{uuid,<<"d95ae85dc319bab78fd23c50f6adae2e">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,13369344000},
{auth_type,sasl},
{flush_enabled,true},
{num_threads,3},
{type,membase},
{num_vbuckets,1024},
{servers,['ns_1@10.242.238.88','ns_1@10.242.238.89',
'ns_1@10.242.238.90','ns_1@10.242.238.91']},
{map_opts_hash,133465355}]}]}]
[ns_server:info,2014-08-19T16:50:20.327,ns_1@10.242.238.90:<0.18785.0>:ns_memcached:do_handle_call:527]Changed vbucket 963 state to replica
[ns_server:info,2014-08-19T16:50:20.327,ns_1@10.242.238.90:tap_replication_manager-default<0.18775.0>:tap_replication_manager:change_vbucket_filter:200]Going to change replication from 'ns_1@10.242.238.91' to have
[961,962,963,966,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,
987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,
1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,
1020,1021,1022,1023] ([963], [])
[ns_server:debug,2014-08-19T16:50:20.328,ns_1@10.242.238.90:<0.22685.0>:ns_vbm_new_sup:do_perform_vbucket_filter_change:135]Registered myself under id:{"default",
{new_child_id,
[961,962,963,966,972,973,974,975,976,977,978,
979,980,981,982,983,984,985,986,987,988,989,
990,991,992,993,994,995,996,997,998,999,1000,
1001,1002,1003,1004,1005,1006,1007,1008,1009,
1010,1011,1012,1013,1014,1015,1016,1017,1018,
1019,1020,1021,1022,1023],
'ns_1@10.242.238.91'},
#Ref<0.0.0.251999>}
Args:[{"10.242.238.91",11209},
{"10.242.238.90",11209},
[{old_state_retriever,#Fun