per_node_diag = [{'ns_1@10.1.2.30', [{version, [{public_key,"0.14"}, {lhttpc,"1.3.0"}, {ale,"8cffe61"}, {os_mon,"2.2.8"}, {couch_set_view,"1.2.0a-051c820-git"}, {mnesia,"4.6"}, {inets,"5.8"}, {couch,"1.2.0a-051c820-git"}, {mapreduce,"1.0.0"}, {kernel,"2.15"}, {crypto,"2.1"}, {ssl,"5.0"}, {sasl,"2.2"}, {ns_server,"2.0.0r-1065-rel-enterprise"}, {mochiweb,"1.4.1"}, {oauth,"7d85d3ef"}, {stdlib,"1.18"}]}, {manifest, ["bucket_engine 2.0.0r-1065-rel Linux-x86_64", "couchbase-examples 2.0.0r-1065-rel Linux-x86_64", "couchbase-python-client 2.0.0r-1065-rel Linux-x86_64", "couchbase-server 2.0.0r-1065-rel Linux-x86_64", "couchdb 2.0.0r-1065-rel Linux-x86_64", "couchdbx-app 2.0.0r-1065-rel Linux-x86_64", "couchstore 2.0.0r-1065-rel Linux-x86_64", "ep-engine 2.0.0r-1065-rel Linux-x86_64", "geocouch 2.0.0r-1065-rel Linux-x86_64", "icu4c 2.0.0r-1065-rel Linux-x86_64", "libconflate 2.0.0r-1065-rel Linux-x86_64", "libcouchbase 2.0.0r-1065-rel Linux-x86_64", "libmemcached 2.0.0r-1065-rel Linux-x86_64", "libvbucket 2.0.0r-1065-rel Linux-x86_64", "manifest 2.0.0r-1065-rel Linux-x86_64", "manifest-master 2.0.0r-1065-rel Linux-x86_64", "mccouch 2.0.0r-1065-rel Linux-x86_64", "membase-cli 2.0.0r-1065-rel Linux-x86_64", "memcached 2.0.0r-1065-rel Linux-x86_64", "memcachetest 2.0.0r-1065-rel Linux-x86_64", "moxi 2.0.0r-1065-rel Linux-x86_64", "ns_server 2.0.0r-1065-rel Linux-x86_64", "otp 2.0.0r-1065-rel Linux-x86_64", "portsigar 2.0.0r-1065-rel Linux-x86_64", "sigar 2.0.0r-1065-rel Linux-x86_64", "snappy 2.0.0r-1065-rel Linux-x86_64", "testrunner 2.0.0r-1065-rel Linux-x86_64", "tlm 2.0.0r-1065-rel Linux-x86_64", "v8 2.0.0r-1065-rel Linux-x86_64", "workload-generator 2.0.0r-1065-rel Linux-x86_64"]}, {config, [{buckets, [{'_vclock', [{'ns_1@10.1.2.30',{266,63501326980}}, {'ns_1@127.0.0.1',{15,63501326546}}]}, {configs, [{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,1435500544}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@10.1.2.30']}]}]}]}, {memory_quota, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326372}}]}|2054]}, {auto_failover_cfg, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326352}}]}, {enabled,false}, {timeout,30}, {max_nodes,1}, {count,0}]}, {autocompaction, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326383}}]}, {parallel_db_and_view_compaction,false}, {database_fragmentation_threshold,{30,nil}}, {view_fragmentation_threshold,{30,nil}}]}, {counters, [{'_vclock',[{'ns_1@10.1.2.30',{3,63501326713}}]}, {rebalance_success,2}, {rebalance_start,2}]}, {directory, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326352}}]}, 47,111,112,116,47,99,111,117,99,104,98,97,115,101,47, 118,97,114,47,108,105,98,47,99,111,117,99,104,98,97, 115,101,47,99,111,110,102,105,103]}, {email_alerts, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326352}}]}, {recipients,["root@localhost"]}, {sender,"couchbase@localhost"}, {enabled,false}, {email_server, [{user,[]}, {pass,'filtered-out'}, {host,"localhost"}, {port,25}, {encrypt,false}]}, {alerts, [auto_failover_node,auto_failover_maximum_reached, auto_failover_other_nodes_down, auto_failover_cluster_too_small]}]}, {max_parallel_indexers,4}, {nodes_wanted, [{'_vclock',[{'ns_1@10.1.2.30',{11,63501326713}}]}, 'ns_1@10.1.2.30']}, {otp, [{'_vclock',[{'ns_1@10.1.2.35',{2,63501301120}}]}, {cookie,olcyvmepmlevmwcj}]}, {rebalance_status, [{'_vclock',[{'ns_1@10.1.2.30',{3,63501326713}}]}|none]}, {rebalancer_pid, [{'_vclock',[{'ns_1@10.1.2.30',{3,63501326713}}]}| undefined]}, {remote_clusters,[]}, {replication,[{enabled,true}]}, {rest,[{port,8091}]}, {rest_creds, [{'_vclock',[{'ns_1@127.0.0.1',{1,63501326356}}]}, {creds, [{"Administrator",[{password,'filtered-out'}]}]}]}, {vbucket_map_history, [{'_vclock', [{'ns_1@10.1.2.30',{2,63501326571}}, {'ns_1@127.0.0.1',{2,63501326373}}]}, {[['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34']], [{max_slaves,10}]}, {[['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined]], [{max_slaves,10}]}]}, {{node,'ns_1@10.1.2.30',capi_port}, [{'_vclock',[{'ns_1@10.1.2.30',{1,63501326556}}]}|8092]}, {{node,'ns_1@10.1.2.30',config_version}, [{'_vclock', [{'ns_1@10.1.2.30',{1,63501326556}}, {'ns_1@127.0.0.1',{6,63501326352}}]}, {2,0}]}, {{node,'ns_1@10.1.2.30',isasl}, [{'_vclock', [{'ns_1@10.1.2.30',{1,63501326556}}, {'ns_1@127.0.0.1',{1,63501326352}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.30',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{1,63501326556}}]}| active]}, {{node,'ns_1@10.1.2.30',memcached}, [{'_vclock', [{'ns_1@10.1.2.30',{1,63501326556}}, {'ns_1@127.0.0.1',{1,63501326352}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.30',moxi}, [{'_vclock',[{'ns_1@10.1.2.30',{1,63501326556}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.30',ns_log}, [{'_vclock', [{'ns_1@10.1.2.30',{1,63501326556}}, {'ns_1@127.0.0.1',{1,63501326352}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.30',port_servers}, [{'_vclock', [{'ns_1@10.1.2.30',{1,63501326556}}, {'ns_1@127.0.0.1',{1,63501326352}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.30',rest}, [{'_vclock',[{'ns_1@10.1.2.30',{1,63501326556}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.30',uuid}, [{'_vclock',[{'ns_1@10.1.2.30',{1,63501326556}}]}| <<"3fc7aa44aa0a6052e7355d4df28fdd10">>]}, {{node,'ns_1@10.1.2.31',capi_port}, [{'_vclock',[{'ns_1@10.1.2.31',{1,63501301117}}]}|8092]}, {{node,'ns_1@10.1.2.31',config_version}, [{'_vclock', [{'ns_1@10.1.2.31',{1,63501301117}}, {'ns_1@127.0.0.1',{6,63501300903}}]}, {2,0}]}, {{node,'ns_1@10.1.2.31',isasl}, [{'_vclock', [{'ns_1@10.1.2.31',{1,63501301117}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.31',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{2,63501326713}}]}| inactiveFailed]}, {{node,'ns_1@10.1.2.31',memcached}, [{'_vclock', [{'ns_1@10.1.2.31',{1,63501301117}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.31',moxi}, [{'_vclock',[{'ns_1@10.1.2.31',{1,63501301117}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.31',ns_log}, [{'_vclock', [{'ns_1@10.1.2.31',{1,63501301117}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.31',port_servers}, [{'_vclock', [{'ns_1@10.1.2.31',{1,63501301117}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.31',rest}, [{'_vclock',[{'ns_1@10.1.2.31',{1,63501301117}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.31',uuid}, [{'_vclock',[{'ns_1@10.1.2.31',{1,63501301117}}]}| <<"696ec4b6995be8362b8501e83adc515a">>]}, {{node,'ns_1@10.1.2.32',capi_port}, [{'_vclock',[{'ns_1@10.1.2.32',{1,63501301118}}]}|8092]}, {{node,'ns_1@10.1.2.32',config_version}, [{'_vclock', [{'ns_1@10.1.2.32',{1,63501301118}}, {'ns_1@127.0.0.1',{6,63501300900}}]}, {2,0}]}, {{node,'ns_1@10.1.2.32',isasl}, [{'_vclock', [{'ns_1@10.1.2.32',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300900}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.32',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{2,63501326713}}]}| inactiveFailed]}, {{node,'ns_1@10.1.2.32',memcached}, [{'_vclock', [{'ns_1@10.1.2.32',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300900}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.32',moxi}, [{'_vclock',[{'ns_1@10.1.2.32',{1,63501301118}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.32',ns_log}, [{'_vclock', [{'ns_1@10.1.2.32',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300900}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.32',port_servers}, [{'_vclock', [{'ns_1@10.1.2.32',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300900}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.32',rest}, [{'_vclock',[{'ns_1@10.1.2.32',{1,63501301118}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.32',uuid}, [{'_vclock',[{'ns_1@10.1.2.32',{1,63501301118}}]}| <<"9042deff5f1a694b94a007a13784e9df">>]}, {{node,'ns_1@10.1.2.33',capi_port}, [{'_vclock',[{'ns_1@10.1.2.33',{1,63501301118}}]}|8092]}, {{node,'ns_1@10.1.2.33',config_version}, [{'_vclock', [{'ns_1@10.1.2.33',{1,63501301118}}, {'ns_1@127.0.0.1',{6,63501300902}}]}, {2,0}]}, {{node,'ns_1@10.1.2.33',isasl}, [{'_vclock', [{'ns_1@10.1.2.33',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.33',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{2,63501326713}}]}| inactiveFailed]}, {{node,'ns_1@10.1.2.33',memcached}, [{'_vclock', [{'ns_1@10.1.2.33',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.33',moxi}, [{'_vclock',[{'ns_1@10.1.2.33',{1,63501301118}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.33',ns_log}, [{'_vclock', [{'ns_1@10.1.2.33',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.33',port_servers}, [{'_vclock', [{'ns_1@10.1.2.33',{1,63501301118}}, {'ns_1@127.0.0.1',{1,63501300902}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.33',rest}, [{'_vclock',[{'ns_1@10.1.2.33',{1,63501301118}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.33',uuid}, [{'_vclock',[{'ns_1@10.1.2.33',{1,63501301118}}]}| <<"361f8aec3bb999e4c29166160617fc05">>]}, {{node,'ns_1@10.1.2.34',capi_port}, [{'_vclock',[{'ns_1@10.1.2.34',{1,63501301119}}]}|8092]}, {{node,'ns_1@10.1.2.34',config_version}, [{'_vclock', [{'ns_1@10.1.2.34',{1,63501301119}}, {'ns_1@127.0.0.1',{6,63501300896}}]}, {2,0}]}, {{node,'ns_1@10.1.2.34',isasl}, [{'_vclock', [{'ns_1@10.1.2.34',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300896}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.34',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{2,63501326713}}]}| inactiveFailed]}, {{node,'ns_1@10.1.2.34',memcached}, [{'_vclock', [{'ns_1@10.1.2.34',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300896}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.34',moxi}, [{'_vclock',[{'ns_1@10.1.2.34',{1,63501301119}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.34',ns_log}, [{'_vclock', [{'ns_1@10.1.2.34',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300896}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.34',port_servers}, [{'_vclock', [{'ns_1@10.1.2.34',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300896}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.34',rest}, [{'_vclock',[{'ns_1@10.1.2.34',{1,63501301119}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.34',uuid}, [{'_vclock',[{'ns_1@10.1.2.34',{1,63501301119}}]}| <<"fa4f91599e815253faba2d2fd59b4f1b">>]}, {{node,'ns_1@10.1.2.35',capi_port}, [{'_vclock',[{'ns_1@10.1.2.35',{1,63501301119}}]}|8092]}, {{node,'ns_1@10.1.2.35',config_version}, [{'_vclock', [{'ns_1@10.1.2.35',{1,63501301119}}, {'ns_1@127.0.0.1',{6,63501300899}}]}, {2,0}]}, {{node,'ns_1@10.1.2.35',isasl}, [{'_vclock', [{'ns_1@10.1.2.35',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300899}}]}, {path, "/opt/couchbase/var/lib/couchbase/data/isasl.pw"}]}, {{node,'ns_1@10.1.2.35',membership}, [{'_vclock',[{'ns_1@10.1.2.30',{2,63501326713}}]}| inactiveFailed]}, {{node,'ns_1@10.1.2.35',memcached}, [{'_vclock', [{'ns_1@10.1.2.35',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300899}}]}, {port,11210}, {mccouch_port,11213}, {dedicated_port,11209}, {dbdir,"/opt/couchbase/var/lib/couchbase/data"}, {admin_user,"_admin"}, {admin_pass,"_admin"}, {bucket_engine, "/opt/couchbase/lib/memcached/bucket_engine.so"}, {engines, [{membase, [{engine,"/opt/couchbase/lib/memcached/ep.so"}, {static_config_string, "vb0=false;waitforwarmup=false;failpartialwarmup=false"}]}, {memcached, [{engine, "/opt/couchbase/lib/memcached/default_engine.so"}, {static_config_string,"vb0=true"}]}]}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.35',moxi}, [{'_vclock',[{'ns_1@10.1.2.35',{1,63501301119}}]}, {port,11211}, {verbosity,[]}]}, {{node,'ns_1@10.1.2.35',ns_log}, [{'_vclock', [{'ns_1@10.1.2.35',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300899}}]}, {filename, "/opt/couchbase/var/lib/couchbase/data/ns_log"}]}, {{node,'ns_1@10.1.2.35',port_servers}, [{'_vclock', [{'ns_1@10.1.2.35',{1,63501301119}}, {'ns_1@127.0.0.1',{1,63501300899}}]}, {moxi,"/opt/couchbase/bin/moxi", ["-Z", {"port_listen=~B,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", [port]}, "-z", {"url=http://127.0.0.1:~B/pools/default/saslBucketsStreaming", [{misc,this_node_rest_port,[]}]}, "-p","0","-Y","y","-O","stderr", {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR", {"~s",[{ns_moxi_sup,rest_user,[]}]}}, {"MOXI_SASL_PLAIN_PWD", {"~s",[{ns_moxi_sup,rest_pass,[]}]}}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}, {memcached,"/opt/couchbase/bin/memcached", ["-X", "/opt/couchbase/lib/memcached/stdin_term_handler.so", "-l", {"0.0.0.0:~B,0.0.0.0:~B:1000",[port,dedicated_port]}, "-E","/opt/couchbase/lib/memcached/bucket_engine.so", "-B","binary","-r","-c","10000","-e", {"admin=~s;default_bucket_name=default;auto_create=false", [admin_user]}, {"~s",[verbosity]}], [{env, [{"EVENT_NOSELECT","1"}, {"MEMCACHED_TOP_KEYS","100"}, {"ISASL_PWFILE",{"~s",[{isasl,path}]}}, {"ISASL_DB_CHECK_TIME","1"}]}, use_stdio,stderr_to_stdout,exit_status, port_server_send_eol,stream]}]}, {{node,'ns_1@10.1.2.35',rest}, [{'_vclock',[{'ns_1@10.1.2.35',{1,63501301119}}]}, {port,8091}, {port_meta,global}]}, {{node,'ns_1@10.1.2.35',uuid}, [{'_vclock',[{'ns_1@10.1.2.35',{1,63501301119}}]}| <<"411a14e34d3708f97a6c208438f383dc">>]}, {uuid,<<"188e88555f80ca17e8a19f415bcac343">>}]}, {basic_info, [{version, [{public_key,"0.14"}, {lhttpc,"1.3.0"}, {ale,"8cffe61"}, {os_mon,"2.2.8"}, {couch_set_view,"1.2.0a-051c820-git"}, {mnesia,"4.6"}, {inets,"5.8"}, {couch,"1.2.0a-051c820-git"}, {mapreduce,"1.0.0"}, {kernel,"2.15"}, {crypto,"2.1"}, {ssl,"5.0"}, {sasl,"2.2"}, {ns_server,"2.0.0r-1065-rel-enterprise"}, {mochiweb,"1.4.1"}, {oauth,"7d85d3ef"}, {stdlib,"1.18"}]}, {system_arch,"x86_64-unknown-linux-gnu"}, {wall_clock,676}, {memory_data,{4040077312,3460702208,{<0.12.0>,4114768}}}, {disk_data, [{"/",55007284,15}, {"/boot",101086,21}, {"/dev/shm",1972692,0}]}]}, {processes, [{<0.0.0>, [{registered_name,init}, {status,waiting}, {initial_call,{otp_ring0,start,2}}, {backtrace, [<<"Program counter: 0x00002aaaab5910b8 (init:loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc1bcb8 Return addr 0x0000000000875c98 ()">>, <<"(0) {state,[{'-root',[<<25 bytes>>]},{'-progname',[<<3 bytes>>]},{'-home',[<<15 bytes>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links,[<0.6.0>,<0.7.0>,<0.3.0>]}, {memory,34360}, {message_queue_len,0}, {reductions,958666}, {trap_exit,true}]}, {<0.3.0>, [{registered_name,erl_prim_loader}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002ac394754a98 (erl_prim_loader:loop/3 + 176)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc4e488 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>, <<"(1) [\"/opt/couchbase/lib/erlang/lib/kernel-2.15/ebin\",\"/opt/couchbase/lib/erlang/lib/s">>, <<"y(2) <0.2.0>">>, <<"(3) {state,efile,[],none,#Port<0.1>,infinity,undefined,true,{prim_state,false,undefine">>, <<"y(4) infinity">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[#Port<0.1>,<0.0.0>]}, {memory,5752}, {message_queue_len,0}, {reductions,2491442}, {trap_exit,true}]}, {<0.6.0>, [{registered_name,error_logger}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae46b8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,couch_access_log,false,ok,<0.192.0>},{handler,ale_error_logger_handler,f">>, <<"y(3) error_logger">>,<<"y(4) <0.2.0>">>, <<>>, <<"0x00002aaaacae46e8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links, [<0.34.0>,<0.193.0>,<0.245.0>,<0.192.0>,<0.23.0>, <0.0.0>]}, {memory,14128}, {message_queue_len,0}, {reductions,1598114}, {trap_exit,true}]}, {<0.7.0>, [{registered_name,application_controller}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc50370 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) application_controller">>, <<"(3) {state,[],[],[],[{mnesia,<0.12916.0>},{ns_server,<0.58.0>},{mapreduce,undefined},{">>, <<"y(4) application_controller">>, <<"y(5) <0.2.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links, [<0.141.0>,<0.170.0>,<0.12916.0>,<0.159.0>,<0.165.0>, <0.147.0>,<0.38.0>,<0.47.0>,<0.58.0>,<0.9.0>, <0.30.0>,<0.0.0>]}, {memory,9272}, {message_queue_len,0}, {reductions,108581}, {trap_exit,true}]}, {<0.9.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394862090 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.10.0>,{appl_data,kernel,[application_controller,erl_reply,auth,boot_serv">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac3948620b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.7.0>,<0.10.0>]}, {memory,3992}, {message_queue_len,0}, {reductions,64}, {trap_exit,true}]}, {<0.10.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948493e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) kernel">>, <<"y(2) <0.11.0>">>,<<"y(3) <0.9.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.9.0>,<0.11.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,69}, {trap_exit,true}]}, {<0.11.0>, [{registered_name,kernel_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf8ba8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,kernel_sup},one_for_all,[{child,<0.12857.0>,net_sup_dynamic,{erl_dis">>, <<"y(4) kernel_sup">>,<<"y(5) <0.10.0>">>, <<>>, <<"0x00002aaaabbf8be0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links, [<0.20.0>,<0.24.0>,<0.25.0>,<0.12857.0>,<0.22.0>, <0.16.0>,<0.18.0>,<0.19.0>,<0.17.0>,<0.12.0>, <0.13.0>,<0.10.0>]}, {memory,6256}, {message_queue_len,0}, {reductions,41161}, {trap_exit,true}]}, {<0.12.0>, [{registered_name,rex}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab9dee590 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) rpc">>, <<"y(3) {1,{<0.25198.0>,{<0.25197.0>,{#Ref<0.0.2.82015>,'ns_1@10.1.2.30'}},nil,nil}}">>, <<"y(4) rex">>,<<"y(5) <0.11.0>">>,<<>>, <<"0x00002aaab9dee5c8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,514229}, {total_heap_size,514229}, {links,[<0.11.0>]}, {memory,4114912}, {message_queue_len,0}, {reductions,27415}, {trap_exit,true}]}, {<0.13.0>, [{registered_name,global_name_server}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0671e00 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) global">>, <<"y(3) {state,true,[],[],[],[],'ns_1@10.1.2.30',<0.14.0>,<0.15.0>,no_trace,false}">>, <<"y(4) global_name_server">>, <<"y(5) <0.11.0>">>,<<>>, <<"0x00002aaab0671e38 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.14.0>,<0.15.0>,<0.11.0>]}, {memory,3096}, {message_queue_len,0}, {reductions,3191}, {trap_exit,true}]}, {<0.14.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab894038 (global:loop_the_locker/1 + 768)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaad069840 Return addr 0x00002aaaab893cf8 (global:init_the_locker/1 + 328)">>, <<"y(0) {multi,[],[],[],'ns_1@10.1.2.30',false,false}">>, <<"y(1) infinity">>,<<>>, <<"0x00002aaaad069858 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.13.0>]}, {memory,2696}, {message_queue_len,0}, {reductions,1201}, {trap_exit,true}]}, {<0.15.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab89b258 (global:loop_the_registrar/0 + 24)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39489eca8 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.13.0>]}, {memory,5712}, {message_queue_len,0}, {reductions,512}, {trap_exit,false}]}, {<0.16.0>, [{registered_name,inet_db}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394852978 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) inet_db">>, <<"(3) {state,inet_db,inet_cache,inet_hosts_byname,inet_hosts_byaddr,inet_hosts_file_byna">>, <<"y(4) inet_db">>,<<"y(5) <0.11.0>">>,<<>>, <<"0x00002ac3948529b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.11.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,255}, {trap_exit,true}]}, {<0.17.0>, [{registered_name,global_group}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb17608 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) global_group">>, <<"y(3) {state,no_conf,true,[],[],[],[],[],'nonode@nohost',[],normal,normal}">>, <<"y(4) global_group">>,<<"y(5) <0.11.0>">>, <<>>, <<"0x00002aaaacb17640 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.11.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,166}, {trap_exit,true}]}, {<0.18.0>, [{registered_name,file_server_2}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0670ad0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) file_server">>, <<"y(3) #Port<0.54>">>, <<"y(4) file_server_2">>,<<"y(5) <0.11.0>">>, <<>>, <<"0x00002aaab0670b08 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [<0.11.0>,<0.405.0>,<0.23229.0>,<0.201.0>, #Port<0.54>]}, {memory,2960}, {message_queue_len,0}, {reductions,8270008}, {trap_exit,true}]}, {<0.19.0>, [{registered_name,code_server}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab967150 (code_server:loop/1 + 128)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc4d180 Return addr 0x0000000000875c98 ()">>, <<"(0) {state,<0.11.0>,\"/opt/couchbase/lib/erlang\",[\".\",\"/opt/couchbase/lib/erlang/lib/ke">>, <<"y(1) <0.11.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,17711}, {total_heap_size,17711}, {links,[<0.11.0>]}, {memory,142624}, {message_queue_len,0}, {reductions,652789}, {trap_exit,true}]}, {<0.20.0>, [{registered_name,standard_error_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394853828 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_bridge">>, <<"y(3) {state,standard_error,<0.21.0>,<0.21.0>,{local,standard_error_sup}}">>, <<"y(4) standard_error_sup">>, <<"y(5) <0.11.0>">>,<<>>, <<"0x00002ac394853860 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.11.0>,<0.21.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,41}, {trap_exit,true}]}, {<0.21.0>, [{registered_name,standard_error}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab961bb0 (standard_error:server_loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948461f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) #Port<0.449>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.20.0>,#Port<0.449>]}, {memory,2840}, {message_queue_len,0}, {reductions,9}, {trap_exit,true}]}, {<0.22.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39485c3b8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_bridge">>, <<"y(3) {state,user_sup,<0.23.0>,<0.23.0>,{<0.22.0>,user_sup}}">>, <<"y(4) <0.22.0>">>,<<"y(5) <0.11.0>">>,<<>>, <<"0x00002ac39485c3f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.11.0>,<0.23.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,199}, {trap_exit,true}]}, {<0.23.0>, [{registered_name,user}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab992630 (user:server_loop/2 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc8fa08 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) {[],[]}">>,<<"y(3) #Port<0.458>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links,[<0.6.0>,<0.22.0>,#Port<0.458>]}, {memory,34464}, {message_queue_len,0}, {reductions,216166}, {trap_exit,true}]}, {<0.24.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394847c30 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) kernel_config">>,<<"y(3) []">>, <<"y(4) <0.24.0>">>,<<"y(5) <0.11.0>">>,<<>>, <<"0x00002ac394847c68 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.11.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,268}, {trap_exit,true}]}, {<0.25.0>, [{registered_name,kernel_safe_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71e3e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,kernel_safe_sup},one_for_one,[{child,<0.474.0>,inet_gethost_native_s">>, <<"y(4) kernel_safe_sup">>, <<"y(5) <0.11.0>">>,<<>>, <<"0x00002aaaab71e418 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links, [<0.63.0>,<0.281.0>,<0.474.0>,<0.280.0>,<0.56.0>, <0.62.0>,<0.11.0>]}, {memory,4192}, {message_queue_len,0}, {reductions,494}, {trap_exit,true}]}, {<0.30.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39485de48 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.31.0>,{appl_data,ale,[],undefined,{ale_app,[]},[ale,ale_app,ale_codegen,">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac39485de68 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.31.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.31.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394847088 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) ale_app">>, <<"y(2) <0.32.0>">>,<<"y(3) <0.30.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.30.0>,<0.32.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,49}, {trap_exit,true}]}, {<0.32.0>, [{registered_name,ale_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394848388 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ale_sup},one_for_all,[{child,<0.34.0>,ale,{ale,start_link,[]},perman">>, <<"y(4) ale_sup">>,<<"y(5) <0.31.0>">>,<<>>, <<"0x00002ac3948483c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.33.0>,<0.34.0>,<0.31.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,169}, {trap_exit,true}]}, {<0.33.0>, [{registered_name,ale_dynamic_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab715e10 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ale_dynamic_sup},one_for_one,[{child,<0.78.0>,'sink-ns_log',{ns_log_">>, <<"y(4) ale_dynamic_sup">>, <<"y(5) <0.32.0>">>,<<>>, <<"0x00002aaaab715e48 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links, [<0.69.0>,<0.75.0>,<0.78.0>,<0.72.0>,<0.61.0>, <0.66.0>,<0.32.0>]}, {memory,13952}, {message_queue_len,0}, {reductions,637}, {trap_exit,true}]}, {<0.34.0>, [{registered_name,ale}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc94ab8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale">>, <<"(3) {state,{dict,6,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[">>, <<"y(4) ale">>,<<"y(5) <0.32.0>">>,<<>>, <<"0x00002aaaabc94af0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.32.0>,<0.6.0>]}, {memory,21648}, {message_queue_len,0}, {reductions,438750}, {trap_exit,true}]}, {<0.38.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394859088 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.39.0>,{appl_data,sasl,[sasl_sup,alarm_handler,overload,release_handler],">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac3948590a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.39.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.39.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394854b68 Return addr 0x0000000000875c98 ()">>, <<"y(0) {state,tty,{undefined,undefined,undefined}}">>, <<"y(1) sasl">>,<<"y(2) <0.40.0>">>, <<"y(3) <0.38.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.38.0>,<0.40.0>]}, {memory,5752}, {message_queue_len,0}, {reductions,115}, {trap_exit,true}]}, {<0.40.0>, [{registered_name,sasl_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394848ae0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,sasl_sup},one_for_one,[{child,<0.44.0>,release_handler,{release_hand">>, <<"y(4) sasl_sup">>,<<"y(5) <0.39.0>">>,<<>>, <<"0x00002ac394848b18 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.41.0>,<0.44.0>,<0.39.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,161}, {trap_exit,true}]}, {<0.41.0>, [{registered_name,sasl_safe_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948565c0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,sasl_safe_sup},one_for_one,[{child,<0.43.0>,overload,{overload,start">>, <<"y(4) sasl_safe_sup">>,<<"y(5) <0.40.0>">>, <<>>, <<"0x00002ac3948565f8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.42.0>,<0.43.0>,<0.40.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,174}, {trap_exit,true}]}, {<0.42.0>, [{registered_name,alarm_handler}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394860478 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) [{handler,alarm_handler,false,[{system_memory_high_watermark,[]}],false}]">>, <<"y(3) alarm_handler">>,<<"y(4) <0.41.0>">>, <<>>, <<"0x00002ac3948604a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.41.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,43}, {trap_exit,true}]}, {<0.43.0>, [{registered_name,overload}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948455f0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) overload">>, <<"y(3) {state,0,0,8.000000e-01,857,1.000000e-01,{0,0},clear}">>, <<"y(4) overload">>,<<"y(5) <0.41.0>">>,<<>>, <<"0x00002ac394845628 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.41.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,39}, {trap_exit,false}]}, {<0.44.0>, [{registered_name,release_handler}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948584a8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) release_handler">>, <<"(3) {state,[],\"/opt/couchbase/lib/erlang\",\"/opt/couchbase/lib/erlang/releases\",[{relea">>, <<"y(4) release_handler">>, <<"y(5) <0.40.0>">>,<<>>, <<"0x00002ac3948584e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.40.0>]}, {memory,8832}, {message_queue_len,0}, {reductions,2224}, {trap_exit,false}]}, {<0.47.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394860be0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.48.0>,{appl_data,os_mon,[os_mon_sup,os_mon_sysinfo,disksup,memsup,cpu_su">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac394860c00 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.48.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.48.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948552c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) os_mon">>, <<"y(2) <0.49.0>">>,<<"y(3) <0.47.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.47.0>,<0.49.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,40}, {trap_exit,true}]}, {<0.49.0>, [{registered_name,os_mon_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39485e588 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,os_mon_sup},one_for_one,[{child,<0.53.0>,cpu_sup,{cpu_sup,start_link">>, <<"y(4) os_mon_sup">>,<<"y(5) <0.48.0>">>, <<>>, <<"0x00002ac39485e5c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.50.0>,<0.51.0>,<0.53.0>,<0.48.0>]}, {memory,2920}, {message_queue_len,0}, {reductions,288}, {trap_exit,true}]}, {<0.50.0>, [{registered_name,disksup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca93d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) disksup">>, <<"(3) {state,80,60000,{unix,linux},[{\"/\",55007284,15},{\"/boot\",101086,21},{\"/dev/shm\",19">>, <<"y(4) disksup">>,<<"y(5) <0.49.0>">>,<<>>, <<"0x00002aaaabca9410 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.49.0>,#Port<0.2976>]}, {memory,2840}, {message_queue_len,0}, {reductions,16610}, {trap_exit,true}]}, {<0.51.0>, [{registered_name,memsup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf6558 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) memsup">>, <<"(3) {state,{unix,linux},true,{3460702208,4040077312},{<0.12.0>,4114768},false,60000,30">>, <<"y(4) memsup">>,<<"y(5) <0.49.0>">>,<<>>, <<"0x00002aaaabbf6590 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.49.0>,<0.52.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,205204}, {trap_exit,true}]}, {<0.52.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac13c3f0 (memsup:port_idle/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab29566d0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) #Port<0.3017>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.51.0>,#Port<0.3017>]}, {memory,2736}, {message_queue_len,0}, {reductions,5669}, {trap_exit,true}]}, {<0.53.0>, [{registered_name,cpu_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39485f8b8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) cpu_sup">>, <<"y(3) {state,<0.54.0>,{unix,linux}}">>, <<"y(4) cpu_sup">>,<<"y(5) <0.49.0>">>,<<>>, <<"0x00002ac39485f8f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.49.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,33}, {trap_exit,true}]}, {<0.54.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac12d148 (cpu_sup:measurement_server_loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948559f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) []">>,<<"y(5) []">>, <<"y(6) []">>,<<"y(7) []">>, <<"y(8) {internal,<0.55.0>,[],{unix,linux}}">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.55.0>]}, {memory,2696}, {message_queue_len,0}, {reductions,11}, {trap_exit,true}]}, {<0.55.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac12e250 (cpu_sup:port_server_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394841f68 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) 6000">>, <<"y(2) #Port<0.3063>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.54.0>,#Port<0.3063>]}, {memory,2736}, {message_queue_len,0}, {reductions,258}, {trap_exit,false}]}, {<0.56.0>, [{registered_name,timer_server}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc10ac8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) 688">>, <<"y(2) timer">>,<<"y(3) []">>, <<"y(4) timer_server">>,<<"y(5) <0.25.0>">>, <<>>, <<"0x00002aaaabc10b00 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [<0.348.0>,<0.418.0>,<0.13096.0>,<0.25110.0>, <0.25192.0>,<0.12996.0>,<0.12924.0>,<0.361.0>, <0.400.0>,<0.323.0>,<0.342.0>,<0.345.0>,<0.327.0>, <0.25.0>]}, {memory,3320}, {message_queue_len,0}, {reductions,3521799}, {trap_exit,true}]}, {<0.58.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39487eb68 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.59.0>,{appl_data,ns_server,[ns_server_sup,ns_config,ns_config_sup,ns_con">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac39487eb88 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.7.0>,<0.59.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,73}, {trap_exit,true}]}, {<0.59.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd92028 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) ns_server">>, <<"y(2) <0.138.0>">>,<<"y(3) <0.58.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.58.0>,<0.138.0>]}, {memory,13648}, {message_queue_len,0}, {reductions,11652}, {trap_exit,true}]}, {<0.61.0>, [{registered_name,'sink-disk_default'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab058a678 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale_disk_sink">>, <<"y(3) {state,'sink-disk_default',\"/opt/couchbase/var/lib/couchbase/logs/log\"}">>, <<"y(4) 'sink-disk_default'">>, <<"y(5) <0.33.0>">>,<<>>, <<"0x00002aaab058a6b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links,[<0.33.0>,<0.64.0>]}, {memory,34424}, {message_queue_len,0}, {reductions,707390}, {trap_exit,true}]}, {<0.62.0>, [{registered_name,disk_log_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd13a58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,disk_log_sup},simple_one_for_one,[{child,undefined,disk_log,{disk_lo">>, <<"y(4) disk_log_sup">>,<<"y(5) <0.25.0>">>, <<>>, <<"0x00002aaaafd13a90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links, [<0.70.0>,<0.76.0>,<0.12992.0>,<0.193.0>,<0.73.0>, <0.64.0>,<0.67.0>,<0.25.0>]}, {memory,4232}, {message_queue_len,0}, {reductions,7879}, {trap_exit,true}]}, {<0.63.0>, [{registered_name,disk_log_server}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc0ca20 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) disk_log_server">>, <<"y(3) {state,[]}">>, <<"y(4) disk_log_server">>, <<"y(5) <0.25.0>">>,<<>>, <<"0x00002aaaabc0ca58 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links, [<0.70.0>,<0.76.0>,<0.12992.0>,<0.193.0>,<0.73.0>, <0.64.0>,<0.67.0>,<0.25.0>]}, {memory,9192}, {message_queue_len,0}, {reductions,9362}, {trap_exit,true}]}, {<0.64.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05451d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,8142,{arg,'sink-disk_default',2,\"/opt/couchbase/var">>, <<>>, <<"0x00002aaab05451e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.61.0>,<0.62.0>,<0.63.0>,#Port<0.3110>]}, {memory,13832}, {message_queue_len,0}, {reductions,248633}, {trap_exit,true}]}, {<0.66.0>, [{registered_name,'sink-disk_error'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0592930 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale_disk_sink">>, <<"y(3) {state,'sink-disk_error',\"/opt/couchbase/var/lib/couchbase/logs/errors\"}">>, <<"y(4) 'sink-disk_error'">>, <<"y(5) <0.33.0>">>,<<>>, <<"0x00002aaab0592968 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links,[<0.33.0>,<0.67.0>]}, {memory,34424}, {message_queue_len,0}, {reductions,58481}, {trap_exit,true}]}, {<0.67.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc0fc40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,601,{arg,'sink-disk_error',2,\"/opt/couchbase/var/li">>, <<>>, <<"0x00002aaaabc0fc50 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.62.0>,<0.66.0>,<0.63.0>,#Port<0.3115>]}, {memory,13832}, {message_queue_len,0}, {reductions,34640}, {trap_exit,true}]}, {<0.69.0>, [{registered_name,'sink-disk_views'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab04c91d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale_disk_sink">>, <<"y(3) {state,'sink-disk_views',\"/opt/couchbase/var/lib/couchbase/logs/views\"}">>, <<"y(4) 'sink-disk_views'">>, <<"y(5) <0.33.0>">>,<<>>, <<"0x00002aaab04c9208 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.33.0>,<0.70.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,504436}, {trap_exit,true}]}, {<0.70.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab284ab90 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,7536,{arg,'sink-disk_views',2,\"/opt/couchbase/var/l">>, <<>>, <<"0x00002aaab284aba0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.62.0>,<0.69.0>,<0.63.0>,#Port<0.3120>]}, {memory,21728}, {message_queue_len,0}, {reductions,216357}, {trap_exit,true}]}, {<0.72.0>, [{registered_name,'sink-disk_couchdb'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0670378 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale_disk_sink">>, <<"y(3) {state,'sink-disk_couchdb',\"/opt/couchbase/var/lib/couchbase/logs/couchdb\"}">>, <<"y(4) 'sink-disk_couchdb'">>, <<"y(5) <0.33.0>">>,<<>>, <<"0x00002aaab06703b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.33.0>,<0.73.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,146735}, {trap_exit,true}]}, {<0.73.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948280e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,2234,{arg,'sink-disk_couchdb',2,\"/opt/couchbase/var">>, <<>>, <<"0x00002ac3948280f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.62.0>,<0.72.0>,<0.63.0>,#Port<0.3125>]}, {memory,21728}, {message_queue_len,0}, {reductions,64523}, {trap_exit,true}]}, {<0.75.0>, [{registered_name,'sink-disk_debug'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd0d1e8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ale_disk_sink">>, <<"y(3) {state,'sink-disk_debug',\"/opt/couchbase/var/lib/couchbase/logs/debug\"}">>, <<"y(4) 'sink-disk_debug'">>, <<"y(5) <0.33.0>">>,<<>>, <<"0x00002aaaafd0d220 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links,[<0.33.0>,<0.76.0>]}, {memory,34424}, {message_queue_len,0}, {reductions,4225199}, {trap_exit,true}]}, {<0.76.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca5338 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,18668,{arg,'sink-disk_debug',2,\"/opt/couchbase/var/">>, <<>>, <<"0x00002aaaabca5348 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.62.0>,<0.75.0>,<0.63.0>,#Port<0.20455>]}, {memory,13832}, {message_queue_len,0}, {reductions,571484}, {trap_exit,true}]}, {<0.78.0>, [{registered_name,'sink-ns_log'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab17d88f8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_log_sink">>,<<"y(3) {state}">>, <<"y(4) 'sink-ns_log'">>,<<"y(5) <0.33.0>">>, <<>>, <<"0x00002aaab17d8930 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,17711}, {total_heap_size,17711}, {links,[<0.33.0>]}, {memory,142624}, {message_queue_len,0}, {reductions,2179}, {trap_exit,false}]}, {<0.138.0>, [{registered_name,ns_server_cluster_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac5836c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_server_cluster_sup},one_for_one,[{child,<0.322.0>,ns_server_sup,{">>, <<"y(4) ns_server_cluster_sup">>, <<"y(5) <0.59.0>">>,<<>>, <<"0x00002aaaac583700 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,6765}, {total_heap_size,6765}, {links, [<0.247.0>,<0.255.0>,<0.306.0>,<0.322.0>,<0.256.0>, <0.254.0>,<0.139.0>,<0.246.0>,<0.59.0>]}, {memory,55376}, {message_queue_len,0}, {reductions,35018}, {trap_exit,true}]}, {<0.139.0>, [{registered_name,cb_couch_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394897bc0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,cb_couch_sup},one_for_one,[{child,<0.176.0>,couch_app,{couch_app,sta">>, <<"y(4) cb_couch_sup">>,<<"y(5) <0.138.0>">>, <<>>, <<"0x00002ac394897bf8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.138.0>,<0.176.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,2249}, {trap_exit,true}]}, {<0.141.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394862f40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.142.0>,{appl_data,crypto,[crypto_sup,crypto_server],undefined,{crypto_ap">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac394862f60 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.142.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.142.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab78e088 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) crypto_app">>, <<"y(2) <0.143.0>">>,<<"y(3) <0.141.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.141.0>,<0.143.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,49}, {trap_exit,true}]}, {<0.143.0>, [{registered_name,crypto_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39489ba68 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,crypto_sup},one_for_all,[{child,<0.144.0>,crypto_server,{crypto_serv">>, <<"y(4) crypto_sup">>,<<"y(5) <0.142.0>">>, <<>>, <<"0x00002ac39489baa0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.142.0>,<0.144.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,102}, {trap_exit,true}]}, {<0.144.0>, [{registered_name,crypto_server}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394888d80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) crypto_server">>,<<"y(3) []">>, <<"y(4) crypto_server">>, <<"y(5) <0.143.0>">>,<<>>, <<"0x00002ac394888db8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.143.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,26}, {trap_exit,false}]}, {<0.147.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39488fd70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.148.0>,{appl_data,inets,[inets_sup,httpc_manager],undefined,{inets_app,[">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac39488fd90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.7.0>,<0.148.0>]}, {memory,3992}, {message_queue_len,0}, {reductions,42}, {trap_exit,true}]}, {<0.148.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948881e8 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) inets_app">>, <<"y(2) <0.149.0>">>,<<"y(3) <0.147.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.147.0>,<0.149.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,42}, {trap_exit,true}]}, {<0.149.0>, [{registered_name,inets_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39488f180 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,inets_sup},one_for_one,[{child,<0.156.0>,tftp_sup,{tftp_sup,start_li">>, <<"y(4) inets_sup">>,<<"y(5) <0.148.0>">>, <<>>, <<"0x00002ac39488f1b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links, [<0.150.0>,<0.155.0>,<0.156.0>,<0.151.0>,<0.148.0>]}, {memory,4112}, {message_queue_len,0}, {reductions,371}, {trap_exit,true}]}, {<0.150.0>, [{registered_name,ftp_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394887a70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ftp_sup},simple_one_for_one,[{child,undefined,undefined,{ftp,start_l">>, <<"y(4) ftp_sup">>,<<"y(5) <0.149.0>">>,<<>>, <<"0x00002ac394887aa8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.149.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,52}, {trap_exit,true}]}, {<0.151.0>, [{registered_name,httpc_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a5328 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,httpc_sup},one_for_one,[{child,<0.154.0>,httpc_handler_sup,{httpc_ha">>, <<"y(4) httpc_sup">>,<<"y(5) <0.149.0>">>, <<>>, <<"0x00002ac3948a5360 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.152.0>,<0.154.0>,<0.149.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,177}, {trap_exit,true}]}, {<0.152.0>, [{registered_name,httpc_profile_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a5a80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,httpc_profile_sup},one_for_one,[{child,<0.153.0>,httpc_manager,{http">>, <<"y(4) httpc_profile_sup">>, <<"y(5) <0.151.0>">>,<<>>, <<"0x00002ac3948a5ab8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.151.0>,<0.153.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,122}, {trap_exit,true}]}, {<0.153.0>, [{registered_name,httpc_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb4de80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) httpc_manager">>, <<"(3) {state,[],httpc_manager__handler_db,{cookie_db,undefined,8213},httpc_manager__sess">>, <<"y(4) httpc_manager">>, <<"y(5) <0.152.0>">>,<<>>, <<"0x00002aaaacb4deb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,10946}, {total_heap_size,10946}, {links,[<0.152.0>]}, {memory,88504}, {message_queue_len,0}, {reductions,1927}, {trap_exit,true}]}, {<0.154.0>, [{registered_name,httpc_handler_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05a8b58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,httpc_handler_sup},simple_one_for_one,[{child,undefined,undefined,{h">>, <<"y(4) httpc_handler_sup">>, <<"y(5) <0.151.0>">>,<<>>, <<"0x00002aaab05a8b90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,10946}, {total_heap_size,10946}, {links,[<0.151.0>]}, {memory,88504}, {message_queue_len,0}, {reductions,1976}, {trap_exit,true}]}, {<0.155.0>, [{registered_name,httpd_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948894d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"y(3) {state,{local,httpd_sup},one_for_one,[],undefined,10,3600,[],httpd_sup,[[]]}">>, <<"y(4) httpd_sup">>,<<"y(5) <0.149.0>">>, <<>>, <<"0x00002ac394889510 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.149.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,43}, {trap_exit,true}]}, {<0.156.0>, [{registered_name,tftp_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a4780 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"y(3) {state,{local,tftp_sup},one_for_one,[],undefined,10,3600,[],tftp_sup,[[]]}">>, <<"y(4) tftp_sup">>,<<"y(5) <0.149.0>">>, <<>>, <<"0x00002ac3948a47b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.149.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,44}, {trap_exit,true}]}, {<0.159.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394887330 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.160.0>,{appl_data,ssl,[ssl_sup,ssl_manager],undefined,{ssl_app,[]},[ssl,">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac394887350 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.160.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.160.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a12b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) ssl_app">>, <<"y(2) <0.161.0>">>,<<"y(3) <0.159.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.159.0>,<0.161.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,49}, {trap_exit,true}]}, {<0.161.0>, [{registered_name,ssl_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394898ec0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ssl_sup},one_for_all,[{child,<0.163.0>,ssl_connection,{ssl_connectio">>, <<"y(4) ssl_sup">>,<<"y(5) <0.160.0>">>,<<>>, <<"0x00002ac394898ef8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.162.0>,<0.163.0>,<0.160.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,182}, {trap_exit,true}]}, {<0.162.0>, [{registered_name,ssl_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394898768 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ssl_manager">>, <<"(3) {state,24601,ssl_session_cache,8640,[12310,16407,20504],#Ref<0.0.0.324>,{undefined">>, <<"y(4) ssl_manager">>,<<"y(5) <0.161.0>">>, <<>>, <<"0x00002ac3948987a0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.161.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,56}, {trap_exit,true}]}, {<0.163.0>, [{registered_name,ssl_connection_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a2898 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ssl_connection_sup},simple_one_for_one,[{child,undefined,undefined,{">>, <<"y(4) ssl_connection_sup">>, <<"y(5) <0.161.0>">>,<<>>, <<"0x00002ac3948a28d0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.161.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,52}, {trap_exit,true}]}, {<0.165.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394899630 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.166.0>,{appl_data,lhttpc,[lhttpc_manager],undefined,{lhttpc,nil},[],[],i">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac394899650 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.166.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.166.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394899d90 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) lhttpc">>, <<"y(2) <0.167.0>">>,<<"y(3) <0.165.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.165.0>,<0.167.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,49}, {trap_exit,true}]}, {<0.167.0>, [{registered_name,lhttpc_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948936a8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,lhttpc_sup},one_for_one,[{child,<0.168.0>,lhttpc_manager,{lhttpc_man">>, <<"y(4) lhttpc_sup">>,<<"y(5) <0.166.0>">>, <<>>, <<"0x00002ac3948936e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.166.0>,<0.168.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,121}, {trap_exit,true}]}, {<0.168.0>, [{registered_name,lhttpc_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39489b0a0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) lhttpc_manager">>, <<"(3) {httpc_man,{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]}">>, <<"y(4) lhttpc_manager">>, <<"y(5) <0.167.0>">>,<<>>, <<"0x00002ac39489b0d8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.167.0>]}, {memory,3952}, {message_queue_len,0}, {reductions,74}, {trap_exit,false}]}, {<0.170.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39489a4e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.171.0>,{appl_data,mochiweb,[],undefined,{mochiweb_app,[]},[mochihex,moch">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002ac39489a500 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.7.0>,<0.171.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,23}, {trap_exit,true}]}, {<0.171.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a3010 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) mochiweb_app">>, <<"y(2) <0.172.0>">>,<<"y(3) <0.170.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.170.0>,<0.172.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,49}, {trap_exit,true}]}, {<0.172.0>, [{registered_name,mochiweb_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948a06d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"y(3) {state,{local,mochiweb_sup},one_for_one,[],undefined,10,10,[],mochiweb_sup,[]}">>, <<"y(4) mochiweb_sup">>,<<"y(5) <0.171.0>">>, <<>>, <<"0x00002ac3948a0710 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.171.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,41}, {trap_exit,true}]}, {<0.175.0>, [{registered_name,couch_config}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b61ce8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_config">>, <<"(3) {config,[{<0.429.0>,#Fun},{<0.400.0>,#Fun>, <<"y(4) couch_config">>,<<"y(5) <0.139.0>">>, <<>>, <<"0x00002aaab1b61d20 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.176.0>]}, {memory,23120}, {message_queue_len,0}, {reductions,257340}, {trap_exit,false}]}, {<0.176.0>, [{registered_name,couch_server_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394883078 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,couch_server_sup},one_for_all,[{child,<0.195.0>,couch_secondary_serv">>, <<"y(4) couch_server_sup">>, <<"y(5) <0.139.0>">>,<<>>, <<"0x00002ac3948830b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.175.0>,<0.177.0>,<0.195.0>,<0.139.0>]}, {memory,9024}, {message_queue_len,0}, {reductions,295}, {trap_exit,true}]}, {<0.177.0>, [{registered_name,couch_primary_services}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab791e38 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,couch_primary_services},one_for_one,[{child,<0.192.0>,couch_access_l">>, <<"y(4) couch_primary_services">>, <<"y(5) <0.176.0>">>,<<>>, <<"0x00002aaaab791e70 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links, [<0.184.0>,<0.190.0>,<0.192.0>,<0.245.0>,<0.191.0>, <0.186.0>,<0.185.0>,<0.180.0>,<0.182.0>,<0.183.0>, <0.181.0>,<0.178.0>,<0.179.0>,<0.176.0>]}, {memory,6336}, {message_queue_len,0}, {reductions,958}, {trap_exit,true}]}, {<0.178.0>, [{registered_name,couch_drv}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394884f70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_drv">>,<<"y(3) nil">>, <<"y(4) couch_drv">>,<<"y(5) <0.177.0>">>, <<>>, <<"0x00002ac394884fa8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.177.0>]}, {memory,5816}, {message_queue_len,0}, {reductions,61}, {trap_exit,false}]}, {<0.179.0>, [{registered_name,couch_task_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca5a70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) couch_task_events">>, <<"y(4) <0.177.0>">>,<<>>, <<"0x00002aaaabca5aa0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,78}, {trap_exit,true}]}, {<0.180.0>, [{registered_name,couch_task_status}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd16f68 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_task_status">>,<<"y(3) nil">>, <<"y(4) couch_task_status">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaaafd16fa0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,52589}, {trap_exit,false}]}, {<0.181.0>, [{registered_name,couch_file_write_guard}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab182eda0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_file_write_guard">>, <<"y(3) true">>, <<"y(4) couch_file_write_guard">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaab182edd8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.177.0>]}, {memory,4168}, {message_queue_len,0}, {reductions,33444}, {trap_exit,false}]}, {<0.182.0>, [{registered_name,couch_server}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2845a98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_server">>, <<"(3) {server,\"/opt/couchbase/var/lib/couchdb\",{re_pattern,0,0,<<124 bytes>>},10000,1,\"W">>, <<"y(4) couch_server">>,<<"y(5) <0.177.0>">>, <<>>, <<"0x00002aaab2845ad0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.204.0>,<0.23233.0>,<0.408.0>,<0.177.0>]}, {memory,9096}, {message_queue_len,0}, {reductions,3783229}, {trap_exit,true}]}, {<0.183.0>, [{registered_name,couch_compress_types}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7970d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_compress_types">>, <<"y(3) nil">>, <<"y(4) couch_compress_types">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaaab797108 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.177.0>]}, {memory,5888}, {message_queue_len,0}, {reductions,137}, {trap_exit,false}]}, {<0.184.0>, [{registered_name,couch_db_update}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab719010 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,couch_db_update_notifier,#Ref<0.0.0.2019>,#Fun>, <<"y(3) couch_db_update">>, <<"y(4) <0.177.0>">>,<<>>, <<"0x00002aaaab719040 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links, [<0.224.0>,<0.413.0>,<0.412.0>,<0.210.0>,<0.218.0>, <0.177.0>]}, {memory,13912}, {message_queue_len,0}, {reductions,458277}, {trap_exit,true}]}, {<0.185.0>, [{registered_name,couch_replication}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab797830 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) couch_replication">>, <<"y(4) <0.177.0>">>,<<>>, <<"0x00002aaaab797860 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,20}, {trap_exit,true}]}, {<0.186.0>, [{registered_name,couch_rep_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7925a0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"y(3) {state,{local,couch_rep_sup},one_for_one,[],undefined,3,10,[],couch_rep_sup,[]}">>, <<"y(4) couch_rep_sup">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaaab7925d8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,41}, {trap_exit,true}]}, {<0.190.0>, [{registered_name,couch_main_index_barrier}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b664c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_index_barrier">>, <<"(3) {state,[],4,{[],[]},{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],">>, <<"y(4) couch_main_index_barrier">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaab1b66500 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,2524}, {trap_exit,false}]}, {<0.191.0>, [{registered_name,couch_replica_index_barrier}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71d808 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_index_barrier">>, <<"(3) {state,[],2,{[],[]},{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],">>, <<"y(4) couch_replica_index_barrier">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaaab71d840 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,42}, {trap_exit,false}]}, {<0.192.0>, [{registered_name,couch_access_log}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71d0b0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {error_logger,couch_access_log}">>, <<"y(4) couch_access_log">>, <<"y(5) <0.177.0>">>,<<>>, <<"0x00002aaaab71d0e8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>,<0.6.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,34}, {trap_exit,false}]}, {<0.193.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6f21b0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,0,{arg,couch_disk_access_logger,undefined,\"/opt/cou">>, <<>>, <<"0x00002aaaab6f21c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.6.0>,<0.62.0>,<0.63.0>,#Port<0.4745>]}, {memory,5936}, {message_queue_len,0}, {reductions,456}, {trap_exit,true}]}, {<0.195.0>, [{registered_name,couch_secondary_services}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcec1d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,couch_secondary_services},one_for_one,[{child,<0.243.0>,uuids,{couch">>, <<"y(4) couch_secondary_services">>, <<"y(5) <0.176.0>">>,<<>>, <<"0x00002aaaafcec210 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,4181}, {total_heap_size,4181}, {links, [<0.219.0>,<0.225.0>,<0.242.0>,<0.243.0>,<0.221.0>, <0.223.0>,<0.220.0>,<0.211.0>,<0.216.0>,<0.217.0>, <0.212.0>,<0.196.0>,<0.197.0>,<0.176.0>]}, {memory,34904}, {message_queue_len,0}, {reductions,29522}, {trap_exit,true}]}, {<0.196.0>, [{registered_name,couch_db_update_notifier_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71eb38 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,couch_db_update_notifier_sup},one_for_one,[],undefined,10,3600,[],co">>, <<"y(4) couch_db_update_notifier_sup">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002aaaab71eb70 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.195.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,312}, {trap_exit,true}]}, {<0.197.0>, [{registered_name,couch_auth_cache}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab799710 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_auth_cache">>, <<"y(3) {state,50,0,<0.210.0>}">>, <<"y(4) couch_auth_cache">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002aaaab799748 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.195.0>,<0.210.0>]}, {memory,9016}, {message_queue_len,0}, {reductions,641}, {trap_exit,true}]}, {<0.200.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394840b48 Return addr 0x00002aaaacd3c328 (couch_file:init/1 + 1032)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_file">>, <<"y(3) {file,<0.202.0>,<0.203.0>,4175}">>, <<"y(4) <0.200.0>">>,<<"y(5) <0.199.0>">>, <<>>, <<"0x00002ac394840b80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) \"/opt/couchbase/var/lib/couchdb/_users.couch.1\"">>, <<"y(5) Catch 0x00002aaaacd3c348 (couch_file:init/1 + 1064)">>, <<>>, <<"0x00002ac394840bb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.203.0>,<0.207.0>,<0.202.0>]}, {memory,4104}, {message_queue_len,0}, {reductions,414}, {trap_exit,true}]}, {<0.201.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab9ae838 (file_io_server:server_loop/1 + 152)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948426c8 Return addr 0x0000000000875c98 ()">>, <<"(0) {state,{file_descriptor,prim_file,{#Port<0.4818>,20}},<0.200.0>,#Ref<0.0.0.511>,<<">>, <<"y(1) #Ref<0.0.0.511>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.18.0>,#Port<0.4818>]}, {memory,2808}, {message_queue_len,0}, {reductions,267}, {trap_exit,false}]}, {<0.202.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd42350 (couch_file:reader_loop/3 + 216)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483ffc0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>, <<"y(1) \"/opt/couchbase/var/lib/couchdb/_users.couch.1\"">>, <<"y(2) []">>,<<>>, <<"0x00002ac39483ffe0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.200.0>]}, {memory,3952}, {message_queue_len,0}, {reductions,352}, {trap_exit,true}]}, {<0.203.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd40eb8 (couch_file:writer_loop/4 + 232)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd1d090 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>,<<"y(1) 4175">>, <<"y(2) \"/opt/couchbase/var/lib/couchdb/_users.couch.1\"">>, <<"y(3) []">>,<<>>, <<"0x00002aaaabd1d0b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.200.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,2393}, {trap_exit,true}]}, {<0.204.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394836780 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db">>, <<"(3) {db,<0.204.0>,<0.205.0>,nil,<<16 bytes>>,<0.200.0>,<0.207.0>,{db_header,10,1,<<28 ">>, <<"y(4) <0.204.0>">>,<<"y(5) <0.199.0>">>, <<>>, <<"0x00002ac3948367b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.182.0>,<0.205.0>]}, {memory,8944}, {message_queue_len,0}, {reductions,179}, {trap_exit,true}]}, {<0.205.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394837aa0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db_updater">>, <<"(3) {db,<0.204.0>,<0.205.0>,nil,<<16 bytes>>,<0.200.0>,<0.207.0>,{db_header,10,1,<<28 ">>, <<"y(4) <0.205.0>">>,<<"y(5) <0.204.0>">>, <<>>, <<"0x00002ac394837ad8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.204.0>]}, {memory,5888}, {message_queue_len,0}, {reductions,1345}, {trap_exit,true}]}, {<0.207.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394892518 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_ref_counter">>, <<"(3) {srv,{dict,3,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[],">>, <<"y(4) <0.207.0>">>,<<"y(5) <0.207.0>">>, <<>>, <<"0x00002ac394892550 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.200.0>]}, {memory,4168}, {message_queue_len,0}, {reductions,109}, {trap_exit,false}]}, {<0.210.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483f3d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {couch_db_update,{couch_db_update_notifier,#Ref<0.0.0.539>}}">>, <<"y(4) <0.210.0>">>,<<"y(5) <0.197.0>">>, <<>>, <<"0x00002ac39483f408 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.197.0>,<0.184.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,32}, {trap_exit,false}]}, {<0.211.0>, [{registered_name,couch_os_daemons}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394886740 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_os_daemons">>,<<"y(3) 28711">>, <<"y(4) couch_os_daemons">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac394886778 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.195.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,3718}, {trap_exit,true}]}, {<0.212.0>, [{registered_name,mc_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483a560 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mc_sup},one_for_one,[{child,<0.215.0>,mc_conn_sup,{mc_conn_sup,start">>, <<"y(4) mc_sup">>,<<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac39483a598 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.213.0>,<0.214.0>,<0.215.0>,<0.195.0>]}, {memory,4072}, {message_queue_len,0}, {reductions,198}, {trap_exit,true}]}, {<0.213.0>, [{registered_name,mc_couch_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbfc090 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) mc_couch_events">>, <<"y(4) <0.212.0>">>,<<>>, <<"0x00002aaaabbfc0c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.212.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,115778}, {trap_exit,true}]}, {<0.214.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_tcp_listener,init,1}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7a15a8 Return addr 0x00002aaaac9f3458 (inet_tcp:accept/1 + 40)">>, <<"y(0) 64903">>,<<"y(1) #Port<0.5134>">>, <<>>, <<"0x00002aaaab7a15c0 Return addr 0x00002aaaacd8f6d0 (mc_tcp_listener:accept_loop/1 + 56)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaab7a15d0 Return addr 0x0000000000875c98 ()">>, <<"y(0) #Port<0.5134>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [#Port<0.13906>,#Port<0.24063>,<0.212.0>, #Port<0.24065>,#Port<0.13907>,#Port<0.9399>, #Port<0.11302>,#Port<0.11303>,#Port<0.9400>, #Port<0.7202>,#Port<0.7224>,#Port<0.5134>]}, {memory,3208}, {message_queue_len,0}, {reductions,2876}, {trap_exit,false}]}, {<0.215.0>, [{registered_name,mc_conn_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7957c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mc_conn_sup},simple_one_for_one,[{child,undefined,mc_connection,{mc_">>, <<"y(4) mc_conn_sup">>,<<"y(5) <0.212.0>">>, <<>>, <<"0x00002aaaab795800 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links, [<0.3770.0>,<0.10174.0>,<0.23206.0>,<0.23209.0>, <0.10177.0>,<0.6952.0>,<0.6955.0>,<0.3773.0>, <0.563.0>,<0.564.0>,<0.212.0>]}, {memory,4352}, {message_queue_len,0}, {reductions,355}, {trap_exit,true}]}, {<0.216.0>, [{registered_name,couch_httpd_vhost}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc581b8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_httpd_vhost">>, <<"(3) {vhosts_state,[],[\"_utils\",\"_uuids\",\"_session\",\"_oauth\",\"_users\"],#Fun>, <<"y(4) couch_httpd_vhost">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002aaaabc581f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.195.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,839}, {trap_exit,false}]}, {<0.217.0>, [{registered_name,couch_set_view}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39485f160 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_set_view">>, <<"y(3) {server,\"/opt/couchbase/var/lib/couchdb\",<0.218.0>}">>, <<"y(4) couch_set_view">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac39485f198 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.218.0>,<0.195.0>]}, {memory,4064}, {message_queue_len,0}, {reductions,369210}, {trap_exit,true}]}, {<0.218.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcd9618 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {couch_db_update,{couch_db_update_notifier,#Ref<0.0.0.590>}}">>, <<"y(4) <0.218.0>">>,<<"y(5) <0.217.0>">>, <<>>, <<"0x00002aaaafcd9650 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.217.0>,<0.184.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,32}, {trap_exit,false}]}, {<0.219.0>, [{registered_name,couch_spatial}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab13c0ae8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_spatial">>, <<"y(3) {spatial,\"/opt/couchbase/var/lib/couchdb\",0,nil,0,nil,[],0,0,0,nil}">>, <<"y(4) couch_spatial">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002aaab13c0b20 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.195.0>]}, {memory,3952}, {message_queue_len,0}, {reductions,223}, {trap_exit,true}]}, {<0.220.0>, [{registered_name,couch_index_merger_connection_pool}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394839518 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) lhttpc_manager">>, <<"(3) {httpc_man,{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]}">>, <<"y(4) couch_index_merger_connection_pool">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac394839550 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.195.0>]}, {memory,5816}, {message_queue_len,0}, {reductions,6129}, {trap_exit,false}]}, {<0.221.0>, [{registered_name,couch_query_servers}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39482c600 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_query_servers">>, <<"y(3) {qserver,32815,41009,45106,36912,[],{[{<<12 bytes>>,true},{<<7 bytes>>,30000}]}}">>, <<"y(4) couch_query_servers">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac39482c638 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.195.0>]}, {memory,5888}, {message_queue_len,0}, {reductions,2399}, {trap_exit,true}]}, {<0.223.0>, [{registered_name,couch_view}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394850388 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_view">>, <<"y(3) {server,\"/opt/couchbase/var/lib/couchdb\"}">>, <<"y(4) couch_view">>,<<"y(5) <0.195.0>">>, <<>>, <<"0x00002ac3948503c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.195.0>,<0.224.0>]}, {memory,8944}, {message_queue_len,0}, {reductions,664240}, {trap_exit,true}]}, {<0.224.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcd8a50 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {couch_db_update,{couch_db_update_notifier,#Ref<0.0.0.618>}}">>, <<"y(4) <0.224.0>">>,<<"y(5) <0.223.0>">>, <<>>, <<"0x00002aaaafcd8a88 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.223.0>,<0.184.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,32}, {trap_exit,false}]}, {<0.225.0>, [{registered_name,couch_httpd}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948316d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mochiweb_socket_server">>, <<"(3) {mochiweb_socket_server,8092,#Fun,{local,couch_httpd},20">>, <<"y(4) couch_httpd">>,<<"y(5) <0.195.0>">>, <<>>, <<"0x00002ac394831708 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links, [<0.231.0>,<0.235.0>,<0.239.0>,<0.240.0>,<0.241.0>, <0.237.0>,<0.238.0>,<0.236.0>,<0.233.0>,<0.234.0>, <0.232.0>,<0.227.0>,<0.229.0>,<0.230.0>,<0.228.0>, <0.195.0>,<0.226.0>,#Port<0.5275>]}, {memory,22360}, {message_queue_len,0}, {reductions,951}, {trap_exit,true}]}, {<0.226.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb2cf00 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4036">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacb2cf18 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacb2cf28 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653173}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacb2cf58 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.227.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacac8088 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4034">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacac80a0 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacac80b0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653165}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacac80e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.228.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb27e30 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4037">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacb27e48 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacb27e58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653177}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacb27e88 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.229.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0669f78 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4023">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaab0669f90 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab0669fa0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653118}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaab0669fd0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.230.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab29538e8 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4024">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaab2953900 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab2953910 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653126}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaab2953940 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.231.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacad2228 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4032">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacad2240 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacad2250 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653157}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacad2280 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.232.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb22d60 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4030">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacb22d78 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacb22d88 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653148}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacb22db8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.233.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc20d30 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4026">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaabc20d48 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabc20d58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653134}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaabc20d88 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.234.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc25e00 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4022">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaabc25e18 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabc25e28 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653082}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaabc25e58 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.235.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae1498 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4027">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacae14b0 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacae14c0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653138}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacae14f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.236.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab294e818 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4025">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaab294e830 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab294e840 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653131}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaab294e870 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.237.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacadc3c8 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4028">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacadc3e0 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacadc3f0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653141}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacadc420 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.238.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb1dc90 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4029">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacb1dca8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacb1dcb8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653145}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacb1dce8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.239.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacad72f8 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4031">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacad7310 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacad7320 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653153}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacad7350 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.240.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacacd158 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4033">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacacd170 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacacd180 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653161}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacacd1b0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.241.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb31fd0 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4035">>,<<"y(1) #Port<0.5275>">>, <<>>, <<"0x00002aaaacb31fe8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacb31ff8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107824,653169}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.5275>">>, <<"y(4) <0.225.0>">>,<<>>, <<"0x00002aaaacb32028 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.225.0>]}, {memory,21680}, {message_queue_len,0}, {reductions,5098}, {trap_exit,false}]}, {<0.242.0>, [{registered_name,couch_external_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394831e28 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_external_manager">>, <<"y(3) 49206">>, <<"y(4) couch_external_manager">>, <<"y(5) <0.195.0>">>,<<>>, <<"0x00002ac394831e60 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.195.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,39}, {trap_exit,true}]}, {<0.243.0>, [{registered_name,couch_uuids}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcd7730 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_uuids">>, <<"y(3) {sequential,\"411dd72cee27aadd76dc34f8cd\",1511}">>, <<"y(4) couch_uuids">>,<<"y(5) <0.195.0>">>, <<>>, <<"0x00002aaaafcd7768 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.195.0>]}, {memory,2872}, {message_queue_len,0}, {reductions,105}, {trap_exit,false}]}, {<0.245.0>, [{registered_name,couch_log}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7a0e40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {error_logger,couch_log}">>, <<"y(4) couch_log">>,<<"y(5) <0.177.0>">>, <<>>, <<"0x00002aaaab7a0e78 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.177.0>,<0.6.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,34}, {trap_exit,false}]}, {<0.246.0>, [{registered_name,timeout_diag_logger}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394892c70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) timeout_diag_logger">>, <<"y(3) {state,1334107147179}">>, <<"y(4) timeout_diag_logger">>, <<"y(5) <0.138.0>">>,<<>>, <<"0x00002ac394892ca8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.138.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,28}, {trap_exit,false}]}, {<0.247.0>, [{registered_name,dist_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac574038 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) dist_manager">>, <<"y(3) {state,true,\"10.1.2.30\"}">>, <<"y(4) dist_manager">>,<<"y(5) <0.138.0>">>, <<>>, <<"0x00002aaaac574070 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.138.0>]}, {memory,8832}, {message_queue_len,0}, {reductions,11963}, {trap_exit,false}]}, {<0.254.0>, [{registered_name,ns_cookie_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab22d28e8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_cookie_manager">>, <<"y(3) {state}">>, <<"y(4) ns_cookie_manager">>, <<"y(5) <0.138.0>">>,<<>>, <<"0x00002aaab22d2920 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,46368}, {total_heap_size,46368}, {links,[<0.138.0>]}, {memory,371880}, {message_queue_len,0}, {reductions,1567532}, {trap_exit,false}]}, {<0.255.0>, [{registered_name,ns_cluster}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac525440 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_cluster">>,<<"y(3) {state}">>, <<"y(4) ns_cluster">>,<<"y(5) <0.138.0>">>, <<>>, <<"0x00002aaaac525478 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.138.0>]}, {memory,8832}, {message_queue_len,0}, {reductions,221443}, {trap_exit,false}]}, {<0.256.0>, [{registered_name,mb_mnesia_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394890c08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mb_mnesia_sup},one_for_one,[{child,<0.258.0>,mb_mnesia,{mb_mnesia,st">>, <<"y(4) mb_mnesia_sup">>, <<"y(5) <0.138.0>">>,<<>>, <<"0x00002ac394890c40 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.257.0>,<0.258.0>,<0.138.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,162}, {trap_exit,true}]}, {<0.257.0>, [{registered_name,mb_mnesia_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac50aa20 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) mb_mnesia_events">>, <<"y(4) <0.256.0>">>,<<>>, <<"0x00002aaaac50aa50 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.256.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,36}, {trap_exit,true}]}, {<0.258.0>, [{registered_name,mb_mnesia}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0d286d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mb_mnesia">>, <<"y(3) {state,['ns_1@10.1.2.30']}">>, <<"y(4) mb_mnesia">>,<<"y(5) <0.256.0>">>, <<>>, <<"0x00002aaab0d28708 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.256.0>,<0.12922.0>]}, {memory,8872}, {message_queue_len,0}, {reductions,118486}, {trap_exit,true}]}, {<0.280.0>, [{registered_name,dets_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc2a7c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,dets_sup},simple_one_for_one,[{child,undefined,dets,{dets,istart_lin">>, <<"y(4) dets_sup">>,<<"y(5) <0.25.0>">>,<<>>, <<"0x00002aaaabc2a800 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.25.0>]}, {memory,3952}, {message_queue_len,0}, {reductions,1290}, {trap_exit,true}]}, {<0.281.0>, [{registered_name,dets}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483cba0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) dets_server">>, <<"y(3) {state,77894,[<0.25.0>],[]}">>, <<"y(4) dets">>,<<"y(5) <0.25.0>">>,<<>>, <<"0x00002ac39483cbd8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.25.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,2317}, {trap_exit,true}]}, {<0.306.0>, [{registered_name,ns_config_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc09818 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_config_sup},rest_for_one,[{child,<0.316.0>,cb_config_couch_sync,{">>, <<"y(4) ns_config_sup">>, <<"y(5) <0.138.0>">>,<<>>, <<"0x00002aaaabc09850 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,6765}, {total_heap_size,6765}, {links, [<0.311.0>,<0.315.0>,<0.316.0>,<0.312.0>,<0.307.0>, <0.308.0>,<0.138.0>]}, {memory,55296}, {message_queue_len,0}, {reductions,2065}, {trap_exit,true}]}, {<0.307.0>, [{registered_name,ns_config_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacaea608 Return addr 0x00002ac394788130 (proc_lib:wake_up/3 + 120)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.1.10057>,{state,#Fun,not_runnin">>, <<"y(3) ns_config_events">>, <<"y(4) <0.306.0>">>,<<>>, <<"0x00002aaaacaea638 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788150 (proc_lib:wake_up/3 + 152)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links, [<0.347.0>,<0.401.0>,<0.423.0>,<0.13097.0>,<0.415.0>, <0.391.0>,<0.399.0>,<0.387.0>,<0.318.0>,<0.343.0>, <0.332.0>,<0.313.0>,<0.315.0>,<0.306.0>]}, {memory,9568}, {message_queue_len,0}, {reductions,40252997}, {trap_exit,true}]}, {<0.308.0>, [{registered_name,ns_config}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab25f2870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_config">>, <<"(3) {config,{full,\"/opt/couchbase/etc/couchbase/config\",undefined,ns_config_default},[">>, <<"y(4) ns_config">>,<<"y(5) <0.306.0>">>, <<>>, <<"0x00002aaab25f28a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.306.0>]}, {memory,601136}, {message_queue_len,0}, {reductions,1162136}, {trap_exit,true}]}, {<0.311.0>, [{registered_name,ns_config_remote}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2eba8e8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_config_replica">>, <<"y(3) {state}">>, <<"y(4) ns_config_remote">>, <<"y(5) <0.306.0>">>,<<>>, <<"0x00002aaab2eba920 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,46368}, {total_heap_size,46368}, {links,[<0.306.0>]}, {memory,371880}, {message_queue_len,0}, {reductions,4301}, {trap_exit,false}]}, {<0.312.0>, [{registered_name,ns_config_isasl_sync}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac51f040 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_config_isasl_sync">>, <<"(3) {state,[{\"default\",[]}],\"/opt/couchbase/var/lib/couchbase/data/isasl.pw\",11,\"_admi">>, <<"y(4) ns_config_isasl_sync">>, <<"y(5) <0.306.0>">>,<<>>, <<"0x00002aaaac51f078 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.306.0>,<0.313.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,58865}, {trap_exit,false}]}, {<0.313.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcd7e98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1294>}">>, <<"y(2) <0.312.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaafcd7ec0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.312.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.315.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483e858 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.306.0>,<0.307.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,11}, {trap_exit,false}]}, {<0.316.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff904d0 (cb_config_couch_sync:worker_loop/0 + 24)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab8300898 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab83008a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.306.0>,<0.317.0>]}, {memory,601176}, {message_queue_len,0}, {reductions,578520}, {trap_exit,true}]}, {<0.317.0>, [{registered_name,cb_config_couch_sync}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaad069f70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) cb_config_couch_sync">>, <<"y(3) {state,false,false,<0.316.0>}">>, <<"y(4) cb_config_couch_sync">>, <<"y(5) <0.316.0>">>,<<>>, <<"0x00002aaaad069fa8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.316.0>,<0.318.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,5054}, {trap_exit,false}]}, {<0.318.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcda4d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1319>}">>, <<"y(2) <0.317.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaafcda500 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.317.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.322.0>, [{registered_name,ns_server_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab182a3f8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_server_sup},one_for_one,[{child,<0.12869.0>,xdc_rdoc_replication_">>, <<"y(4) ns_server_sup">>, <<"y(5) <0.138.0>">>,<<>>, <<"0x00002aaab182a430 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links, [<0.361.0>,<0.418.0>,<0.426.0>,<0.12869.0>, <0.13096.0>,<0.421.0>,<0.422.0>,<0.420.0>,<0.398.0>, <0.414.0>,<0.416.0>,<0.400.0>,<0.390.0>,<0.397.0>, <0.366.0>,<0.351.0>,<0.356.0>,<0.360.0>,<0.357.0>, <0.353.0>,<0.355.0>,<0.352.0>,<0.325.0>,<0.342.0>, <0.350.0>,<0.339.0>,<0.323.0>,<0.324.0>,<0.138.0>]}, {memory,14832}, {message_queue_len,0}, {reductions,5423}, {trap_exit,true}]}, {<0.323.0>, [{registered_name,ns_log}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0593508 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_log">>, <<"(3) {state,[],{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},">>, <<"y(4) ns_log">>,<<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaab0593540 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.56.0>,<0.322.0>]}, {memory,3992}, {message_queue_len,0}, {reductions,2982}, {trap_exit,true}]}, {<0.324.0>, [{registered_name,ns_log_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x0000000000875c90 (unknown function)">>, <<"CP: 0x0000000000875c98 ()">>, <<"arity = 3">>,<<" proc_lib">>,<<" wake_up">>, <<" [gen_event,wake_hib,[<0.322.0>,ns_log_events,[{handler,ns_mail_log,false,{state},<0.359.">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,34}, {total_heap_size,34}, {links,[<0.322.0>,<0.359.0>]}, {memory,1248}, {message_queue_len,0}, {reductions,644}, {trap_exit,true}]}, {<0.325.0>, [{registered_name,ns_node_disco_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcdac20 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_node_disco_sup},rest_for_one,[{child,<0.333.0>,ns_config_rep,{ns_">>, <<"y(4) ns_node_disco_sup">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaaafcdac58 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [<0.331.0>,<0.332.0>,<0.333.0>,<0.326.0>,<0.327.0>, <0.322.0>]}, {memory,3000}, {message_queue_len,0}, {reductions,842}, {trap_exit,true}]}, {<0.326.0>, [{registered_name,ns_node_disco_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05477f0 Return addr 0x00002ac394788130 (proc_lib:wake_up/3 + 120)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.1.5778>,{state,#Fun>, <<"y(3) ns_node_disco_events">>, <<"y(4) <0.325.0>">>,<<>>, <<"0x00002aaab0547820 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788150 (proc_lib:wake_up/3 + 152)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [<0.387.0>,<0.12885.0>,<0.331.0>,<0.333.0>,<0.325.0>]}, {memory,3176}, {message_queue_len,0}, {reductions,17656}, {trap_exit,true}]}, {<0.327.0>, [{registered_name,ns_node_disco}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3d63870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_node_disco">>, <<"y(3) {state,['ns_1@10.1.2.30'],false}">>, <<"y(4) ns_node_disco">>, <<"y(5) <0.325.0>">>,<<>>, <<"0x00002aaab3d638a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.325.0>,<0.56.0>]}, {memory,601176}, {message_queue_len,0}, {reductions,7813748}, {trap_exit,false}]}, {<0.331.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaffa10e8 (ns_node_disco_log:'-start_link/0-fun-0-'/0 + 80)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab79fb50 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.325.0>,<0.326.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,10}, {trap_exit,false}]}, {<0.332.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71b6a8 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.325.0>,<0.307.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,11}, {trap_exit,false}]}, {<0.333.0>, [{registered_name,ns_config_rep}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394866ed0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_config_rep">>, <<"y(3) {state,<0.338.0>}">>, <<"y(4) ns_config_rep">>, <<"y(5) <0.325.0>">>,<<>>, <<"0x00002ac394866f08 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.326.0>,<0.338.0>,<0.325.0>]}, {memory,8912}, {message_queue_len,0}, {reductions,486883}, {trap_exit,false}]}, {<0.338.0>, [{registered_name,ns_config_rep_merger}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaffa7e08 (ns_config_rep:merger_loop/0 + 24)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaba0ef8b0 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.333.0>]}, {memory,601032}, {message_queue_len,0}, {reductions,2222880}, {trap_exit,false}]}, {<0.339.0>, [{registered_name,ns_tick_event}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394828818 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.0.2042>,{state,#Fun,ignored},<">>, <<"y(3) ns_tick_event">>, <<"y(4) <0.322.0>">>,<<>>, <<"0x00002ac394828848 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.417.0>,<0.322.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,10475}, {trap_exit,true}]}, {<0.342.0>, [{registered_name,mb_master}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2843ba8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mb_master">>, <<"y(3) {state,<0.344.0>,'ns_1@10.1.2.30',['ns_1@10.1.2.30'],{1334,107513,960979}}">>, <<"y(4) master">>,<<"y(5) mb_master">>, <<"y(6) <0.322.0>">>,<<>>, <<"0x00002aaab2843be8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.322.0>,<0.343.0>,<0.344.0>,<0.56.0>]}, {memory,4072}, {message_queue_len,0}, {reductions,314716}, {trap_exit,true}]}, {<0.343.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39488da10 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1563>}">>, <<"y(2) <0.342.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002ac39488da38 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.342.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.344.0>, [{registered_name,mb_master_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac527480 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mb_master_sup},one_for_one,[{child,<0.349.0>,auto_failover,{auto_fai">>, <<"y(4) mb_master_sup">>, <<"y(5) <0.342.0>">>,<<>>, <<"0x00002aaaac5274b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links, [<0.345.0>,<0.348.0>,<0.349.0>,<0.346.0>,<0.342.0>]}, {memory,8992}, {message_queue_len,0}, {reductions,3573}, {trap_exit,true}]}, {<0.345.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2430868 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_orchestrator">>, <<"y(3) {janitor_state,[\"default\"],<0.24894.0>}">>, <<"y(4) janitor_running">>, <<"y(5) ns_orchestrator">>, <<"y(6) <0.344.0>">>,<<>>, <<"0x00002aaab24308a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.344.0>,<0.24894.0>,<0.56.0>]}, {memory,601288}, {message_queue_len,0}, {reductions,176446}, {trap_exit,true}]}, {<0.346.0>, [{registered_name,cb_replication}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab8c8a870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) cb_replication">>, <<"(3) {state,{dict,1,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[">>, <<"y(4) cb_replication">>, <<"y(5) <0.344.0>">>,<<>>, <<"0x00002aaab8c8a8a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.344.0>,<0.347.0>]}, {memory,601176}, {message_queue_len,0}, {reductions,675009}, {trap_exit,false}]}, {<0.347.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac528f08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1597>}">>, <<"y(2) <0.346.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaac528f30 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.346.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.348.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc55738 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_tick">>, <<"y(3) {state,1334107825881}">>, <<"y(4) ns_tick">>,<<"y(5) <0.344.0>">>,<<>>, <<"0x00002aaaabc55770 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.344.0>,<0.56.0>]}, {memory,2912}, {message_queue_len,0}, {reductions,25417}, {trap_exit,false}]}, {<0.349.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394895590 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) auto_failover">>, <<"y(3) {state,undefined,nil,30,0}">>, <<"y(4) auto_failover">>, <<"y(5) <0.344.0>">>,<<>>, <<"0x00002ac3948955c8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.344.0>]}, {memory,8904}, {message_queue_len,0}, {reductions,1156}, {trap_exit,false}]}, {<0.350.0>, [{registered_name,master_activity_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483d300 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.0.1678>,{state,#Fun>, <<"y(3) master_activity_events">>, <<"y(4) <0.322.0>">>,<<>>, <<"0x00002ac39483d330 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.322.0>,<0.354.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,77154}, {trap_exit,true}]}, {<0.351.0>, [{registered_name,master_activity_events_ingress}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae8720 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.0.1656>,{state,#Fun>, <<"y(3) master_activity_events_ingress">>, <<"y(4) <0.322.0>">>,<<>>, <<"0x00002aaaacae8750 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.322.0>,<0.352.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,86438}, {trap_exit,true}]}, {<0.352.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394895cf8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1656>}">>, <<"y(2) <0.322.0>">>, <<"y(3) master_activity_events_ingress">>,<<>>, <<"0x00002ac394895d20 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.322.0>,<0.351.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.353.0>, [{registered_name,master_activity_events_keeper}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaabd82eb80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) master_activity_events_keeper">>, <<"(3) {state,{[{{1334,107780,995772},create_bucket,\"default\",membase,[{sasl_password,[]}">>, <<"y(4) master_activity_events_keeper">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaabd82ebb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,317811}, {total_heap_size,317811}, {links,[<0.322.0>,<0.354.0>]}, {memory,2543464}, {message_queue_len,0}, {reductions,99195}, {trap_exit,false}]}, {<0.354.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39489f3d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1678>}">>, <<"y(2) <0.353.0>">>, <<"y(3) master_activity_events">>,<<>>, <<"0x00002ac39489f400 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.353.0>,<0.350.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.355.0>, [{registered_name,master_activity_events_srv}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2949768 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) work_queue">>,<<"y(3) []">>, <<"y(4) master_activity_events_srv">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaab29497a0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,6765}, {total_heap_size,6765}, {links,[<0.322.0>]}, {memory,55056}, {message_queue_len,0}, {reductions,273856}, {trap_exit,false}]}, {<0.356.0>, [{registered_name,buckets_events}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafce2f30 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,menelaus_event,buckets_events,{state,undefined,[{<0.24585.0>,#Ref<0.0.2.">>, <<"y(3) buckets_events">>, <<"y(4) <0.322.0>">>,<<>>, <<"0x00002aaaafce2f60 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.362.0>,<0.387.0>,<0.322.0>]}, {memory,4248}, {message_queue_len,0}, {reductions,5048}, {trap_exit,true}]}, {<0.357.0>, [{registered_name,ns_mail_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac572150 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_mail_sup},one_for_all,[{child,<0.359.0>,ns_mail_log,{ns_mail_log,">>, <<"y(4) ns_mail_sup">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaaac572188 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.358.0>,<0.359.0>,<0.322.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,673}, {trap_exit,true}]}, {<0.358.0>, [{registered_name,ns_mail}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac5757b8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_mail">>,<<"y(3) empty_state">>, <<"y(4) ns_mail">>,<<"y(5) <0.357.0>">>,<<>>, <<"0x00002aaaac5757f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.357.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,27}, {trap_exit,true}]}, {<0.359.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac571a38 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.357.0>,<0.324.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,11}, {trap_exit,false}]}, {<0.360.0>, [{registered_name,ns_stats_event}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf7138 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"(2) [{handler,ns_pubsub,#Ref<0.0.0.2070>,{state,#Fun,ignored},<">>, <<"y(3) ns_stats_event">>, <<"y(4) <0.322.0>">>,<<>>, <<"0x00002aaaabbf7168 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.419.0>,<0.322.0>]}, {memory,3992}, {message_queue_len,0}, {reductions,97567}, {trap_exit,true}]}, {<0.361.0>, [{registered_name,ns_heart}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac39476b390 (gen:do_call/4 + 576)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab20787e8 Return addr 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<"y(0) #Ref<0.0.2.81726>">>, <<"y(1) 'ns_1@10.1.2.30'">>,<<"y(2) []">>, <<"y(3) 5000">>,<<"y(4) connected">>, <<"y(5) '$gen_call'">>, <<"y(6) <0.24861.0>">>,<<>>, <<"0x00002aaab2078828 Return addr 0x00002aaab0068e20 (ns_memcached:connected/3 + 256)">>, <<"y(0) 5000">>,<<"y(1) connected">>, <<"y(2) {'ns_memcached-default','ns_1@10.1.2.30'}">>, <<"y(3) Catch 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<>>, <<"0x00002aaab2078850 Return addr 0x00002ac3947a6ae0 (lists:'-filter/2-lc$^0/1-0-'/2 + 96)">>, <<"y(0) Catch 0x00002aaab0068e40 (ns_memcached:connected/3 + 288)">>, <<"y(1) []">>,<<>>, <<"0x00002aaab2078868 Return addr 0x00002aaaafff6788 (ns_heart:current_status/1 + 1024)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<"y(2) \"default\"">>,<<>>, <<"0x00002aaab2078888 Return addr 0x00002aaaafff5e88 (ns_heart:handle_info/2 + 992)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) [\"default\"]">>, <<"(3) [{outgoing_replications_safeness_level,[{\"default\",unknown}]},{incoming_replicatio">>, <<"y(4) []">>,<<"y(5) []">>,<<"y(6) 1">>, <<"(7) [{meminfo,<<777 bytes>>},{system_memory_data,[{system_total_memory,4040077312},{fr">>, <<"(8) [{cpu_idle_ms,0},{cpu_local_ms,3990},{cpu_utilization_rate,1.000000e+02},{mem_actu">>, <<>>, <<"0x00002aaab20788d8 Return addr 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<"(0) {state,undefined,[{meminfo,<<777 bytes>>},{system_memory_data,[{system_total_memor">>, <<>>, <<"0x00002aaab20788e8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) ns_heart">>, <<"(1) {state,undefined,[{meminfo,<<777 bytes>>},{system_memory_data,[{system_total_memor">>, <<"y(2) ns_heart">>,<<"y(3) <0.322.0>">>, <<"y(4) beat">>, <<"y(5) Catch 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<>>, <<"0x00002aaab2078920 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,46368}, {total_heap_size,46368}, {links,[<0.322.0>,<0.362.0>,<0.56.0>]}, {memory,372032}, {message_queue_len,0}, {reductions,1247327}, {trap_exit,true}]}, {<0.362.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab709e98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1715>}">>, <<"y(2) <0.361.0>">>, <<"y(3) buckets_events">>,<<>>, <<"0x00002aaaab709ec0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.361.0>,<0.356.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.366.0>, [{registered_name,menelaus_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab29544e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,menelaus_sup},one_for_one,[{child,<0.25192.0>,menelaus_web_alerts_sr">>, <<"y(4) menelaus_sup">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaab2954518 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links, [<0.368.0>,<0.25110.0>,<0.25192.0>,<0.387.0>, <0.322.0>]}, {memory,4112}, {message_queue_len,0}, {reductions,10617}, {trap_exit,true}]}, {<0.368.0>, [{registered_name,menelaus_web}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab066be80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mochiweb_socket_server">>, <<"(3) {mochiweb_socket_server,8091,#Fun,{local,menelaus_web},2">>, <<"y(4) menelaus_web">>,<<"y(5) <0.366.0>">>, <<>>, <<"0x00002aaab066beb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links, [<0.24630.0>,<0.25155.0>,<0.25183.0>,<0.25193.0>, <0.25195.0>,<0.25189.0>,<0.25169.0>,<0.25181.0>, <0.25165.0>,<0.25089.0>,<0.25127.0>,<0.25139.0>, <0.25117.0>,<0.25101.0>,<0.25008.0>,<0.25065.0>, <0.25077.0>,<0.24699.0>,<0.22443.0>,<0.24618.0>, <0.24585.0>,<0.366.0>,<0.3688.0>,<0.3685.0>, #Port<0.6828>]}, {memory,9792}, {message_queue_len,0}, {reductions,55029}, {trap_exit,true}]}, {<0.387.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394842e30 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.326.0>,<0.366.0>,<0.356.0>,<0.307.0>]}, {memory,2816}, {message_queue_len,0}, {reductions,27}, {trap_exit,false}]}, {<0.390.0>, [{registered_name,ns_port_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac48bd08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,ns_port_sup},one_for_one,[{child,<0.23025.0>,{moxi,\"/opt/couchbase/b">>, <<"y(4) ns_port_sup">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaaac48bd40 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,10946}, {total_heap_size,10946}, {links,[<0.391.0>,<0.394.0>,<0.23025.0>,<0.322.0>]}, {memory,88624}, {message_queue_len,0}, {reductions,21410}, {trap_exit,true}]}, {<0.391.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab704dc8 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.390.0>,<0.307.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,11}, {trap_exit,false}]}, {<0.394.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd61730 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_cushion">>, <<"y(3) {state,memcached,5000,{1334,107153,6649},<0.396.0>}">>, <<"y(4) <0.394.0>">>,<<"y(5) <0.390.0>">>, <<>>, <<"0x00002aaaabd61768 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.390.0>,<0.396.0>]}, {memory,13752}, {message_queue_len,0}, {reductions,925}, {trap_exit,true}]}, {<0.396.0>, [{registered_name,ns_port_memcached}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3095128 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_port_server">>, <<"(3) {state,#Port<0.6885>,memcached,{[\"warmup completed in 517 usec\",\"metadata loaded i">>, <<"y(4) <0.396.0>">>,<<"y(5) <0.394.0>">>, <<>>, <<"0x00002aaab3095160 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,46368}, {total_heap_size,46368}, {links,[<0.394.0>,#Port<0.6885>]}, {memory,371920}, {message_queue_len,0}, {reductions,183870}, {trap_exit,true}]}, {<0.397.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaac1b9ef0 (misc:'-start_event_link/1-fun-0-'/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd7a000 Return addr 0x0000000000875c98 ()">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,6765}, {total_heap_size,6765}, {links,[<0.322.0>,<0.399.0>]}, {memory,54992}, {message_queue_len,0}, {reductions,611}, {trap_exit,false}]}, {<0.398.0>, [{registered_name,ns_bucket_worker}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac39476b390 (gen:do_call/4 + 576)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab79c868 Return addr 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<"y(0) #Ref<0.0.2.75902>">>, <<"y(1) 'ns_1@10.1.2.30'">>,<<"y(2) []">>, <<"y(3) infinity">>, <<"y(4) {terminate_child,{per_bucket_sup,\"default\"}}">>, <<"y(5) '$gen_call'">>,<<"y(6) <0.414.0>">>, <<>>, <<"x00002aaaab79c8a8 Return addr 0x00002aaab00c5be0 (ns_bucket_sup:'-update_childs/1-fun-2-'/">>, <<"y(0) infinity">>, <<"y(1) {terminate_child,{per_bucket_sup,\"default\"}}">>, <<"y(2) ns_bucket_sup">>, <<"y(3) Catch 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<>>, <<"0x00002aaaab79c8d0 Return addr 0x00002ac394795b68 (lists:foreach/2 + 120)">>, <<"y(0) {per_bucket_sup,\"default\"}">>,<<>>, <<"0x00002aaaab79c8e0 Return addr 0x00002aaaaffaea68 (work_queue:handle_cast/2 + 56)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<>>, <<"0x00002aaaab79c8f8 Return addr 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaab79c908 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) work_queue">>,<<"y(1) []">>, <<"y(2) ns_bucket_worker">>, <<"y(3) <0.322.0>">>, <<"y(4) {'$gen_cast',#Fun}">>, <<"y(5) Catch 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<>>, <<"0x00002aaaab79c940 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.322.0>]}, {memory,13848}, {message_queue_len,2}, {reductions,20962}, {trap_exit,false}]}, {<0.399.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd8d4e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1915>}">>, <<"y(2) <0.397.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaabd8d508 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.397.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.400.0>, [{registered_name,xdc_rep_manager}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac4d5aa0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) xdc_rep_manager">>, <<"y(3) {rep_db_state,<0.411.0>,<<11 bytes>>,<0.412.0>}">>, <<"y(4) xdc_rep_manager">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaaac4d5ad8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,10946}, {total_heap_size,10946}, {links, [<0.322.0>,<0.411.0>,<0.412.0>,<0.401.0>,<0.56.0>]}, {memory,88736}, {message_queue_len,0}, {reductions,39917}, {trap_exit,true}]}, {<0.401.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394834438 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.1935>}">>, <<"y(2) <0.400.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002ac394834460 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.400.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.404.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd8b0f0 Return addr 0x00002aaaacd3c328 (couch_file:init/1 + 1032)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_file">>, <<"y(3) {file,<0.406.0>,<0.407.0>,12367}">>, <<"y(4) <0.404.0>">>,<<"y(5) <0.403.0>">>, <<>>, <<"0x00002aaaabd8b128 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) \"/opt/couchbase/var/lib/couchdb/_replicator.couch.1\"">>, <<"y(5) Catch 0x00002aaaacd3c348 (couch_file:init/1 + 1064)">>, <<>>, <<"0x00002aaaabd8b160 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.407.0>,<0.410.0>,<0.406.0>]}, {memory,4104}, {message_queue_len,0}, {reductions,715}, {trap_exit,true}]}, {<0.405.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab9ae838 (file_io_server:server_loop/1 + 152)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf9a58 Return addr 0x0000000000875c98 ()">>, <<"(0) {state,{file_descriptor,prim_file,{#Port<0.6920>,31}},<0.404.0>,#Ref<0.0.0.1948>,<">>, <<"y(1) #Ref<0.0.0.1948>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.18.0>,#Port<0.6920>]}, {memory,2808}, {message_queue_len,0}, {reductions,267}, {trap_exit,false}]}, {<0.406.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd42350 (couch_file:reader_loop/3 + 216)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac523570 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>, <<"y(1) \"/opt/couchbase/var/lib/couchdb/_replicator.couch.1\"">>, <<"y(2) []">>,<<>>, <<"0x00002aaaac523590 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.404.0>]}, {memory,5816}, {message_queue_len,0}, {reductions,11762}, {trap_exit,true}]}, {<0.407.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd40eb8 (couch_file:writer_loop/4 + 232)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf3370 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>,<<"y(1) 12367">>, <<"y(2) \"/opt/couchbase/var/lib/couchdb/_replicator.couch.1\"">>, <<"y(3) []">>,<<>>, <<"0x00002aaaabbf3398 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.404.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,2514}, {trap_exit,true}]}, {<0.408.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b5cc18 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db">>, <<"(3) {db,<0.408.0>,<0.409.0>,nil,<<16 bytes>>,<0.404.0>,<0.410.0>,{db_header,10,2,<<28 ">>, <<"y(4) <0.408.0>">>,<<"y(5) <0.403.0>">>, <<>>, <<"0x00002aaab1b5cc50 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.182.0>,<0.409.0>]}, {memory,8944}, {message_queue_len,0}, {reductions,402}, {trap_exit,true}]}, {<0.409.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc139c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db_updater">>, <<"(3) {db,<0.408.0>,<0.409.0>,nil,<<16 bytes>>,<0.404.0>,<0.410.0>,{db_header,10,2,<<28 ">>, <<"y(4) <0.409.0>">>,<<"y(5) <0.408.0>">>, <<>>, <<"0x00002aaaabc13a00 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.408.0>]}, {memory,8904}, {message_queue_len,0}, {reductions,1588}, {trap_exit,true}]}, {<0.410.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39487c570 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_ref_counter">>, <<"(3) {srv,{dict,2,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[],">>, <<"y(4) <0.410.0>">>,<<"y(5) <0.410.0>">>, <<>>, <<"0x00002ac39487c5a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.404.0>]}, {memory,4096}, {message_queue_len,0}, {reductions,767}, {trap_exit,false}]}, {<0.411.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaf651278 (couch_changes:wait_db_updated/3 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"x00002aaaabc5ab60 Return addr 0x00002aaaaf64ffc0 (couch_changes:keep_sending_changes/9 + 8">>, <<"y(0) ok">>, <<"y(1) #Fun">>, <<"y(2) infinity">>,<<>>, <<"x00002aaaabc5ab80 Return addr 0x00002aaaaf653258 (couch_changes:'-handle_changes/3-fun-1-'">>, <<"y(0) 999999999999998">>,<<"y(1) ok">>, <<"y(2) <<0 bytes>>">>,<<"y(3) 2">>, <<"y(4) #Fun">>, <<"y(5) infinity">>, <<"(6) {db,<0.408.0>,<0.409.0>,nil,<<16 bytes>>,<0.404.0>,<0.410.0>,{db_header,10,0,nil,n">>, <<"y(7) #Fun">>, <<"(8) {changes_args,\"continuous\",fwd,0,1000000000000000,main_only,undefined,infinity,[],">>, <<"y(9) [sys_db]">>,<<"y(10) []">>, <<"y(11) \"continuous\"">>,<<>>, <<"x00002aaaabc5abe8 Return addr 0x00002aaaaf6468c0 (couch_replication_manager:'-changes_feed">>, <<"y(0) Catch 0x00002aaaaf6532c0 (couch_changes:'-handle_changes/3-fun-1-'/5 + 800)">>, <<"y(1) []">>,<<"y(2) []">>, <<"y(3) []">>,<<"y(4) []">>, <<"y(5) []">>,<<"y(6) <0.413.0>">>,<<>>, <<"0x00002aaaabc5ac28 Return addr 0x0000000000875c98 ()">>, <<"(0) {db,<0.408.0>,<0.409.0>,nil,<<16 bytes>>,<0.404.0>,<0.410.0>,{db_header,10,0,nil,n">>, <<"y(1) []">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.400.0>,<0.413.0>]}, {memory,8768}, {message_queue_len,0}, {reductions,1357}, {trap_exit,false}]}, {<0.412.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd04f30 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {couch_db_update,{couch_db_update_notifier,#Ref<0.0.0.2009>}}">>, <<"y(4) <0.412.0>">>,<<"y(5) <0.400.0>">>, <<>>, <<"0x00002aaaafd04f68 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.400.0>,<0.184.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,32}, {trap_exit,false}]}, {<0.413.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae6538 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_event_sup">>, <<"y(3) {couch_db_update,{couch_db_update_notifier,#Ref<0.0.0.2019>}}">>, <<"y(4) <0.413.0>">>,<<"y(5) <0.411.0>">>, <<>>, <<"0x00002aaaacae6570 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.411.0>,<0.184.0>]}, {memory,3992}, {message_queue_len,0}, {reductions,64}, {trap_exit,false}]}, {<0.414.0>, [{registered_name,ns_bucket_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaab84e678 (supervisor:shutdown/2 + 992)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd8ec20 Return addr 0x00002aaaab84e110 (supervisor:do_terminate/2 + 264)">>, <<"y(0) infinity">>,<<"y(1) <0.23201.0>">>, <<>>, <<"0x00002aaaabd8ec38 Return addr 0x00002aaaab847d50 (supervisor:handle_call/3 + 720)">>, <<"y(0) {local,ns_bucket_sup}">>, <<"y(1) permanent">>, <<"(2) {child,<0.23201.0>,{per_bucket_sup,\"default\"},{single_bucket_sup,start_link,[\"defa">>, <<>>, <<"0x00002aaaabd8ec58 Return addr 0x00002ac3947f2340 (gen_server:handle_msg/5 + 272)">>, <<"(0) {state,{local,ns_bucket_sup},one_for_one,[{child,<0.23201.0>,{per_bucket_sup,\"defa">>, <<"y(1) one_for_one">>, <<"(2) {child,<0.23201.0>,{per_bucket_sup,\"default\"},{single_bucket_sup,start_link,[\"defa">>, <<>>, <<"0x00002aaaabd8ec78 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) supervisor">>, <<"(1) {state,{local,ns_bucket_sup},one_for_one,[{child,<0.23201.0>,{per_bucket_sup,\"defa">>, <<"y(2) ns_bucket_sup">>, <<"y(3) <0.322.0>">>, <<"y(4) {terminate_child,{per_bucket_sup,\"default\"}}">>, <<"y(5) {<0.398.0>,#Ref<0.0.2.75902>}">>, <<"y(6) Catch 0x00002ac3947f2340 (gen_server:handle_msg/5 + 272)">>, <<>>, <<"0x00002aaaabd8ecb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.415.0>,<0.322.0>]}, {memory,4136}, {message_queue_len,0}, {reductions,5402}, {trap_exit,true}]}, {<0.415.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394833168 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.2022>}">>, <<"y(2) <0.414.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002ac394833190 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.414.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.416.0>, [{registered_name,system_stats_collector}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab29579d0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) system_stats_collector">>, <<"(3) {state,#Port<0.7040>,[{cpu_local_ms,5161337010},{cpu_idle_ms,4566607790},{swap_tot">>, <<"y(4) system_stats_collector">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaab2957a08 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.322.0>,<0.417.0>,#Port<0.7040>]}, {memory,4032}, {message_queue_len,0}, {reductions,46720}, {trap_exit,false}]}, {<0.417.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaac51be58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.2042>}">>, <<"y(2) <0.416.0>">>, <<"y(3) ns_tick_event">>,<<>>, <<"0x00002aaaac51be80 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.416.0>,<0.339.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.418.0>, [{registered_name,'stats_archiver-@system'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394829b30 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) stats_archiver">>, <<"y(3) {state,\"@system\"}">>, <<"y(4) 'stats_archiver-@system'">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002ac394829b68 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.322.0>,<0.419.0>,<0.56.0>]}, {memory,5896}, {message_queue_len,0}, {reductions,3515921}, {trap_exit,false}]}, {<0.419.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39482a718 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.2070>}">>, <<"y(2) <0.418.0>">>, <<"y(3) ns_stats_event">>,<<>>, <<"0x00002ac39482a740 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.418.0>,<0.360.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.420.0>, [{registered_name,'stats_reader-@system'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcdf138 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) stats_reader">>, <<"y(3) {state,\"@system\"}">>, <<"y(4) 'stats_reader-@system'">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaaafcdf170 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.322.0>]}, {memory,5816}, {message_queue_len,0}, {reductions,32518}, {trap_exit,false}]}, {<0.421.0>, [{registered_name,ns_moxi_sup_work_queue}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab374d870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) work_queue">>,<<"y(3) []">>, <<"y(4) ns_moxi_sup_work_queue">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaab374d8a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.322.0>]}, {memory,601136}, {message_queue_len,0}, {reductions,394872}, {trap_exit,false}]}, {<0.422.0>, [{registered_name,ns_moxi_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc58910 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"y(3) {state,{local,ns_moxi_sup},one_for_one,[],undefined,20,10,[],ns_moxi_sup,[]}">>, <<"y(4) ns_moxi_sup">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaaabc58948 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.322.0>,<0.423.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,3519}, {trap_exit,true}]}, {<0.423.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae6ca0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.0.2081>}">>, <<"y(2) <0.422.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaacae6cc8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.422.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.426.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabcaa800 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_cushion">>, <<"y(3) {state,couchbase_compaction_daemon,3000,{1334,107153,199014},<0.429.0>}">>, <<"y(4) <0.426.0>">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaaabcaa838 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.322.0>,<0.429.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,817}, {trap_exit,true}]}, {<0.429.0>, [{registered_name,couchbase_compaction_daemon}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacaee810 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couchbase_compaction_daemon">>, <<"y(3) {state,<0.430.0>}">>, <<"y(4) couchbase_compaction_daemon">>, <<"y(5) <0.426.0>">>,<<>>, <<"0x00002aaaacaee848 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.426.0>,<0.430.0>]}, {memory,13824}, {message_queue_len,0}, {reductions,3630}, {trap_exit,true}]}, {<0.430.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,3}}, {backtrace, [<<"Program counter: 0x00002aaaac142090 (timer:sleep/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"x00002aaab334b8e0 Return addr 0x00002aaaaf6658a8 (couchbase_compaction_daemon:compact_loop">>, <<"y(0) 30000">>,<<>>, <<"0x00002aaab334b8f0 Return addr 0x00002ac394787e40 (proc_lib:init_p/3 + 688)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) <0.429.0>">>,<<>>, <<"0x00002aaab334b910 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>, <<"y(1) Catch 0x00002ac394787e60 (proc_lib:init_p/3 + 720)">>, <<"y(2) []">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,46368}, {total_heap_size,46368}, {links,[<0.429.0>]}, {memory,371880}, {message_queue_len,0}, {reductions,74185}, {trap_exit,false}]}, {<0.474.0>, [{registered_name,inet_gethost_native_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394844e98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_bridge">>, <<"y(3) {state,inet_gethost_native,<0.475.0>,<0.475.0>,{local,inet_gethost_native_sup}}">>, <<"y(4) inet_gethost_native_sup">>, <<"y(5) <0.25.0>">>,<<>>, <<"0x00002ac394844ed0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.25.0>,<0.475.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,41}, {trap_exit,true}]}, {<0.475.0>, [{registered_name,inet_gethost_native}, {status,waiting}, {initial_call,{inet_gethost_native,server_init,2}}, {backtrace, [<<"Program counter: 0x00002aaaaf679020 (inet_gethost_native:main_loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2954c70 Return addr 0x0000000000875c98 ()">>, <<"y(0) {state,#Port<0.7089>,8000,139340,143444,<0.474.0>,4,{statistics,0,0,0,0,0,0,0,0}}">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.474.0>,#Port<0.7089>]}, {memory,2840}, {message_queue_len,0}, {reductions,33929}, {trap_exit,true}]}, {<0.563.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6ec518 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 13526">>,<<"y(1) #Port<0.7202>">>, <<>>, <<"0x00002aaaab6ec530 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.565.0>">>, <<"y(1) #Port<0.7202>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.565.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,38770}, {trap_exit,false}]}, {<0.564.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc56a20 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 380">>,<<"y(1) #Port<0.7224>">>,<<>>, <<"0x00002aaaabc56a38 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.566.0>">>, <<"y(1) #Port<0.7224>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.566.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,91}, {trap_exit,false}]}, {<0.565.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaad0b34d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.568.0>,nil,[],#Por">>, <<"y(4) processing">>,<<"y(5) <0.565.0>">>, <<"y(6) <0.563.0>">>,<<>>, <<"0x00002aaaad0b3518 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.563.0>,<0.568.0>,#Port<0.7202>]}, {memory,8912}, {message_queue_len,0}, {reductions,56639}, {trap_exit,false}]}, {<0.566.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae73e0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.569.0>,nil,[],#Por">>, <<"y(4) processing">>,<<"y(5) <0.566.0>">>, <<"y(6) <0.564.0>">>,<<>>, <<"0x00002aaaacae7420 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.564.0>,<0.569.0>,#Port<0.7224>]}, {memory,2880}, {message_queue_len,0}, {reductions,59}, {trap_exit,false}]}, {<0.568.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafce3a98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.568.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup,">>, <<"y(4) <0.568.0>">>,<<"y(5) <0.565.0>">>, <<>>, <<"0x00002aaaafce3ad0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.565.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.569.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca2118 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.569.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup,">>, <<"y(4) <0.569.0>">>,<<"y(5) <0.566.0>">>, <<>>, <<"0x00002aaaabca2150 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.566.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.3685.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaca5bf48 (mochiweb_http:request/2 + 88)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3c75878 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>, <<"y(3) #Fun">>, <<"y(4) #Port<0.9365>">>,<<>>, <<"0x00002aaab3c758a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.368.0>,#Port<0.9365>]}, {memory,601176}, {message_queue_len,0}, {reductions,2859731}, {trap_exit,false}]}, {<0.3688.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaab001ff50 (menelaus_web:handle_pool_info_wait/6 + 400)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"x00002aaab83ee0e0 Return addr 0x00002aaab001f968 (menelaus_web:check_and_handle_pool_info/">>, <<"y(0) 19995">>,<<"y(1) \"88983614\"">>, <<"y(2) \"88983614\"">>, <<"y(3) 1334107841352">>, <<"y(4) \"10.1.2.30\"">>, <<"y(5) {\"Administrator\",\"password\"}">>, <<"y(6) \"default\"">>, <<"(7) {mochiweb_request,#Port<0.9367>,'GET',\"/pools/default?waitChange=20000&etag=889836">>, <<>>, <<"0x00002aaab83ee128 Return addr 0x00002aaab001d3a8 (menelaus_web:loop/3 + 32696)">>, <<"y(0) Catch 0x00002aaab001f9b8 (menelaus_web:check_and_handle_pool_info/2 + 328)">>, <<"y(1) []">>,<<"y(2) <0.25167.0>">>,<<>>, <<"0x00002aaab83ee148 Return addr 0x00002aaaaca5c928 (mochiweb_http:headers/5 + 1176)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"(4) {mochiweb_request,#Port<0.9367>,'GET',\"/pools/default?waitChange=20000&etag=889836">>, <<"y(5) Catch 0x00002aaab001d488 (menelaus_web:loop/3 + 32920)">>, <<>>, <<"0x00002aaab83ee180 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<"y(2) []">>, <<"(3) {mochiweb_request,#Port<0.9367>,'GET',\"/pools/default?waitChange=20000&etag=889836">>, <<>>, <<"0x00002aaab83ee1a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,121393}, {total_heap_size,121393}, {links,[<0.368.0>,<0.25167.0>,#Port<0.9367>]}, {memory,972376}, {message_queue_len,0}, {reductions,3357820}, {trap_exit,false}]}, {<0.3770.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2958638 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 55345">>,<<"y(1) #Port<0.9399>">>, <<>>, <<"0x00002aaab2958650 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.3771.0>">>, <<"y(1) #Port<0.9399>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.3771.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,46748}, {trap_exit,false}]}, {<0.3771.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab04cee70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.3772.0>,nil,[],#Po">>, <<"y(4) processing">>,<<"y(5) <0.3771.0>">>, <<"y(6) <0.3770.0>">>,<<>>, <<"0x00002aaab04ceeb0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.3770.0>,<0.3772.0>,#Port<0.9399>]}, {memory,8912}, {message_queue_len,0}, {reductions,65898}, {trap_exit,false}]}, {<0.3772.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbfa180 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.3772.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup">>, <<"y(4) <0.3772.0>">>,<<"y(5) <0.3771.0>">>, <<>>, <<"0x00002aaaabbfa1b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.3771.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.3773.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39482ae78 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 41074">>,<<"y(1) #Port<0.9400>">>, <<>>, <<"0x00002ac39482ae90 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.3774.0>">>, <<"y(1) #Port<0.9400>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.3774.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,83}, {trap_exit,false}]}, {<0.3774.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacb18bd8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.3775.0>,nil,[],#Po">>, <<"y(4) processing">>,<<"y(5) <0.3774.0>">>, <<"y(6) <0.3773.0>">>,<<>>, <<"0x00002aaaacb18c18 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.3773.0>,<0.3775.0>,#Port<0.9400>]}, {memory,2880}, {message_queue_len,0}, {reductions,51}, {trap_exit,false}]}, {<0.3775.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacbf6ac0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.3775.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup">>, <<"y(4) <0.3775.0>">>,<<"y(5) <0.3774.0>">>, <<>>, <<"0x00002aaaacbf6af8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.3774.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.6952.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05afbd8 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 53140">>,<<"y(1) #Port<0.11302>">>, <<>>, <<"0x00002aaab05afbf0 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.6953.0>">>, <<"y(1) #Port<0.11302>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.6953.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,81091}, {trap_exit,false}]}, {<0.6953.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab054a420 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.6954.0>,nil,[],#Po">>, <<"y(4) processing">>,<<"y(5) <0.6953.0>">>, <<"y(6) <0.6952.0>">>,<<>>, <<"0x00002aaab054a460 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.6952.0>,<0.6954.0>,#Port<0.11302>]}, {memory,8912}, {message_queue_len,0}, {reductions,111295}, {trap_exit,false}]}, {<0.6954.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0672d78 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.6954.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup">>, <<"y(4) <0.6954.0>">>,<<"y(5) <0.6953.0>">>, <<>>, <<"0x00002aaab0672db0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.6953.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.6955.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394851de8 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 31003">>,<<"y(1) #Port<0.11303>">>, <<>>, <<"0x00002ac394851e00 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.6956.0>">>, <<"y(1) #Port<0.11303>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.6956.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,83}, {trap_exit,false}]}, {<0.6956.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab707f98 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.6957.0>,nil,[],#Po">>, <<"y(4) processing">>,<<"y(5) <0.6956.0>">>, <<"y(6) <0.6955.0>">>,<<>>, <<"0x00002aaaab707fd8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.6955.0>,<0.6957.0>,#Port<0.11303>]}, {memory,2880}, {message_queue_len,0}, {reductions,51}, {trap_exit,false}]}, {<0.6957.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0664ec8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.6957.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_sup">>, <<"y(4) <0.6957.0>">>,<<"y(5) <0.6956.0>">>, <<>>, <<"0x00002aaab0664f00 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.6956.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.10174.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394846928 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 16769">>,<<"y(1) #Port<0.13906>">>, <<>>, <<"0x00002ac394846940 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.10175.0>">>, <<"y(1) #Port<0.13906>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.10175.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,249218}, {trap_exit,false}]}, {<0.10175.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1200118 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.10176.0>,nil,[],#P">>, <<"y(4) processing">>,<<"y(5) <0.10175.0>">>, <<"y(6) <0.10174.0>">>,<<>>, <<"0x00002aaab1200158 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.10174.0>,<0.10176.0>,#Port<0.13906>]}, {memory,8912}, {message_queue_len,0}, {reductions,366567}, {trap_exit,false}]}, {<0.10176.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6f6e08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.10176.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_su">>, <<"y(4) <0.10176.0>">>, <<"y(5) <0.10175.0>">>,<<>>, <<"0x00002aaaab6f6e40 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.10175.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.10177.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab794030 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 30399">>,<<"y(1) #Port<0.13907>">>, <<>>, <<"0x00002aaaab794048 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.10178.0>">>, <<"y(1) #Port<0.13907>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.10178.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,83}, {trap_exit,false}]}, {<0.10178.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394861318 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.10179.0>,nil,[],#P">>, <<"y(4) processing">>,<<"y(5) <0.10178.0>">>, <<"y(6) <0.10177.0>">>,<<>>, <<"0x00002ac394861358 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.10177.0>,<0.10179.0>,#Port<0.13907>]}, {memory,2880}, {message_queue_len,0}, {reductions,51}, {trap_exit,false}]}, {<0.10179.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6efe40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.10179.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_su">>, <<"y(4) <0.10179.0>">>, <<"y(5) <0.10178.0>">>,<<>>, <<"0x00002aaaab6efe78 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.10178.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.12857.0>, [{registered_name,net_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab17ea8a0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,net_sup},one_for_all,[{child,<0.12865.0>,net_kernel,{net_kernel,star">>, <<"y(4) net_sup">>,<<"y(5) <0.11.0>">>,<<>>, <<"0x00002aaab17ea8d8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.12862.0>,<0.12863.0>,<0.12865.0>,<0.11.0>]}, {memory,5936}, {message_queue_len,0}, {reductions,293}, {trap_exit,true}]}, {<0.12862.0>, [{registered_name,erl_epmd}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab13a9328 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) erl_epmd">>, <<"y(3) {state,#Port<0.14821>,21100,ns_1}">>, <<"y(4) erl_epmd">>,<<"y(5) <0.12857.0>">>, <<>>, <<"0x00002aaab13a9360 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12857.0>,#Port<0.14821>]}, {memory,2840}, {message_queue_len,0}, {reductions,128}, {trap_exit,false}]}, {<0.12863.0>, [{registered_name,auth}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc57160 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) auth">>, <<"y(3) {state,olcyvmepmlevmwcj,3682359}">>, <<"y(4) auth">>,<<"y(5) <0.12857.0>">>,<<>>, <<"0x00002aaaabc57198 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12857.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,437}, {trap_exit,true}]}, {<0.12865.0>, [{registered_name,net_kernel}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabcaaf58 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) net_kernel">>, <<"(3) {state,'ns_1@10.1.2.30','ns_1@10.1.2.30',longnames,{tick,<0.12883.0>,15000},7000,s">>, <<"y(4) net_kernel">>,<<"y(5) <0.12857.0>">>, <<>>, <<"0x00002aaaabcaaf90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links, [<0.12857.0>,<0.12883.0>,<0.12880.0>,#Port<0.14819>]}, {memory,2960}, {message_queue_len,0}, {reductions,3135}, {trap_exit,true}]}, {<0.12869.0>, [{registered_name,xdc_rdoc_replication_srv}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab71aef0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) cb_generic_replication_srv">>, <<"(3) {state,xdc_rdoc_replication_srv,ok,xdc_rdoc_replication_srv,[],[{<<24 bytes>>,{0,<">>, <<"y(4) xdc_rdoc_replication_srv">>, <<"y(5) <0.322.0>">>,<<>>, <<"0x00002aaaab71af28 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.322.0>,<0.12885.0>]}, {memory,8872}, {message_queue_len,0}, {reductions,19217}, {trap_exit,false}]}, {<0.12880.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{inet_tcp_dist,accept_loop,2}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacaeb1e8 Return addr 0x00002aaaac9f3458 (inet_tcp:accept/1 + 40)">>, <<"y(0) 55259">>,<<"y(1) #Port<0.14819>">>, <<>>, <<"0x00002aaaacaeb200 Return addr 0x00002aaaaca8e548 (inet_tcp_dist:accept_loop/2 + 96)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacaeb210 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) #Port<0.14819>">>, <<"y(2) <0.12865.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12865.0>]}, {memory,2768}, {message_queue_len,0}, {reductions,1827}, {trap_exit,false}]}, {<0.12883.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{net_kernel,ticker,2}}, {backtrace, [<<"Program counter: 0x00002aaaab916ce0 (net_kernel:ticker_loop/2 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b677f8 Return addr 0x0000000000875c98 ()">>, <<"y(0) 15000">>,<<"y(1) <0.12865.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12865.0>]}, {memory,2696}, {message_queue_len,0}, {reductions,65}, {trap_exit,false}]}, {<0.12885.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05481d8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.1.5778>}">>, <<"y(2) <0.12869.0>">>, <<"y(3) ns_node_disco_events">>,<<>>, <<"0x00002aaab0548200 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12869.0>,<0.326.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.12916.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947faac8 (application_master:main_loop/2 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab05470a8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,<0.12917.0>,{appl_data,mnesia,[mnesia_dumper_load_regulator,mnesia_event,mn">>, <<"y(2) <0.7.0>">>,<<>>, <<"0x00002aaab05470c8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.7.0>,<0.12917.0>]}, {memory,8872}, {message_queue_len,0}, {reductions,136}, {trap_exit,true}]}, {<0.12917.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{application_master,start_it,4}}, {backtrace, [<<"Program counter: 0x00002ac3947fcdd8 (application_master:loop_it/4 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaad06a6e8 Return addr 0x0000000000875c98 ()">>, <<"y(0) {normal,[]}">>,<<"y(1) mnesia_sup">>, <<"y(2) <0.12918.0>">>, <<"y(3) <0.12916.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12916.0>,<0.12918.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,32}, {trap_exit,true}]}, {<0.12918.0>, [{registered_name,mnesia_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6f47c8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mnesia_sup},one_for_all,[{child,<0.12920.0>,mnesia_kernel_sup,{mnesi">>, <<"y(4) mnesia_sup">>,<<"y(5) <0.12917.0>">>, <<>>, <<"0x00002aaaab6f4800 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.12919.0>,<0.12920.0>,<0.12917.0>]}, {memory,8912}, {message_queue_len,0}, {reductions,253}, {trap_exit,true}]}, {<0.12919.0>, [{registered_name,mnesia_event}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947735f0 (gen_event:fetch_msg/5 + 72)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd15c40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) false">>,<<"y(1) []">>, <<"y(2) [{handler,mnesia_event,false,{state,[],false,[]},false}]">>, <<"y(3) mnesia_event">>, <<"y(4) <0.12918.0>">>,<<>>, <<"0x00002aaaafd15c70 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.12918.0>,<0.12922.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,503}, {trap_exit,true}]}, {<0.12920.0>, [{registered_name,mnesia_kernel_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc0ab38 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mnesia_kernel_sup},one_for_all,[{child,<0.12997.0>,mnesia_late_loade">>, <<"y(4) mnesia_kernel_sup">>, <<"y(5) <0.12918.0>">>,<<>>, <<"0x00002aaaabc0ab70 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links, [<0.12923.0>,<0.12995.0>,<0.12996.0>,<0.12997.0>, <0.12925.0>,<0.12994.0>,<0.12924.0>,<0.12921.0>, <0.12922.0>,<0.12918.0>]}, {memory,6176}, {message_queue_len,0}, {reductions,608}, {trap_exit,true}]}, {<0.12921.0>, [{registered_name,mnesia_monitor}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafce1020 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mnesia_monitor">>, <<"y(3) {state,<0.12920.0>,[],[],true,[],undefined,[]}">>, <<"y(4) mnesia_monitor">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002aaaafce1058 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.12992.0>,<0.12920.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,8256}, {trap_exit,true}]}, {<0.12922.0>, [{registered_name,mnesia_subscr}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394844740 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mnesia_subscr">>, <<"y(3) {state,<0.12920.0>,3686465}">>, <<"y(4) mnesia_subscr">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002ac394844778 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12919.0>,<0.12920.0>,<0.258.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,111}, {trap_exit,true}]}, {<0.12923.0>, [{registered_name,mnesia_locker}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaace4bf88 (mnesia_locker:loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2956dc8 Return addr 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) []">>,<<"y(5) {state,<0.12920.0>}">>, <<>>, <<"0x00002aaab2956e00 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) Catch 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(1) mnesia_locker">>,<<"y(2) []">>, <<"y(3) []">>,<<"y(4) [<0.12920.0>]">>,<<>>, <<"0x00002aaab2956e30 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12920.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,99804}, {trap_exit,true}]}, {<0.12924.0>, [{registered_name,mnesia_recover}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafce2350 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mnesia_recover">>, <<"y(3) {state,<0.12920.0>,undefined,undefined,undefined,0,false,true,[]}">>, <<"y(4) mnesia_recover">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002aaaafce2388 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12920.0>,<0.56.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,2855}, {trap_exit,true}]}, {<0.12925.0>, [{registered_name,mnesia_tm}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaafd188c8 (mnesia_tm:doit_loop/1 + 200)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc111e0 Return addr 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) {state,{0,nil},{0,nil},<0.12920.0>,[],[],[]}">>, <<"y(3) []">>,<<"y(4) []">>, <<"y(5) <0.12920.0>">>,<<"y(6) {0,nil}">>, <<"y(7) {0,nil}">>,<<>>, <<"0x00002aaaabc11228 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) Catch 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(1) mnesia_tm">>,<<"y(2) []">>, <<"y(3) []">>,<<"y(4) [<0.12920.0>]">>,<<>>, <<"0x00002aaaabc11258 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12920.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,268002}, {trap_exit,true}]}, {<0.12992.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaac15d200 (disk_log:loop/1 + 168)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483c470 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"(0) {state,[],[],<0.62.0>,<0.63.0>,278,{arg,latest_log,undefined,\"/opt/couchbase/var/l">>, <<>>, <<"0x00002ac39483c480 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.63.0>,<0.12921.0>,<0.62.0>,#Port<0.24430>]}, {memory,8952}, {message_queue_len,0}, {reductions,73259}, {trap_exit,true}]}, {<0.12994.0>, [{registered_name,mnesia_checkpoint_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab77ad68 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mnesia_checkpoint_sup},simple_one_for_one,[{child,undefined,mnesia_c">>, <<"y(4) mnesia_checkpoint_sup">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002aaaab77ada0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12920.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,59}, {trap_exit,true}]}, {<0.12995.0>, [{registered_name,mnesia_snmp_sup}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab10eec78 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{local,mnesia_snmp_sup},simple_one_for_one,[{child,undefined,mnesia_snmp_su">>, <<"y(4) mnesia_snmp_sup">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002aaab10eecb0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.12920.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,59}, {trap_exit,true}]}, {<0.12996.0>, [{registered_name,mnesia_controller}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1aa7558 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mnesia_controller">>, <<"(3) {state,<0.12920.0>,true,[],[],{0,nil},[],[],{0,nil},undefined,[],[],{interval,#Ref">>, <<"y(4) mnesia_controller">>, <<"y(5) <0.12920.0>">>,<<>>, <<"0x00002aaab1aa7590 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.12920.0>,<0.56.0>]}, {memory,5856}, {message_queue_len,0}, {reductions,4333}, {trap_exit,true}]}, {<0.12997.0>, [{registered_name,mnesia_late_loader}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaafd3de48 (mnesia_late_loader:loop/1 + 40)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0541f98 Return addr 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) {state,<0.12920.0>}">>,<<>>, <<"0x00002aaab0541fb8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) Catch 0x00002aaaace2fc38 (mnesia_sp:init_proc/4 + 240)">>, <<"y(1) mnesia_late_loader">>,<<"y(2) []">>, <<"y(3) []">>,<<"y(4) [<0.12920.0>]">>,<<>>, <<"0x00002aaab0541fe8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,1597}, {total_heap_size,1597}, {links,[<0.12920.0>]}, {memory,13712}, {message_queue_len,0}, {reductions,1111}, {trap_exit,false}]}, {<0.13096.0>, [{registered_name,ns_doctor}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3494870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_doctor">>, <<"(3) {state,{dict,6,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[">>, <<"y(4) ns_doctor">>,<<"y(5) <0.322.0>">>, <<>>, <<"0x00002aaab34948a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.322.0>,<0.13097.0>,<0.56.0>]}, {memory,601216}, {message_queue_len,0}, {reductions,1403068}, {trap_exit,false}]}, {<0.13097.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaff854f8 (ns_pubsub:do_subscribe_link/4 + 392)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaad06ae30 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"y(1) {ns_pubsub,#Ref<0.0.1.10057>}">>, <<"y(2) <0.13096.0>">>, <<"y(3) ns_config_events">>,<<>>, <<"0x00002aaaad06ae58 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.13096.0>,<0.307.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,21}, {trap_exit,true}]}, {<0.22443.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaab0022bf0 (menelaus_web:handle_streaming/4 + 440)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3a999a8 Return addr 0x00002aaab001d408 (menelaus_web:loop/3 + 32792)">>, <<"(0) {mochiweb_response,{mochiweb_request,#Port<0.23947>,'GET',\"/pools/default/saslBuck">>, <<"(1) {mochiweb_request,#Port<0.23947>,'GET',\"/pools/default/saslBucketsStreaming\",{1,1}">>, <<"y(2) #Fun">>, <<"(3) {struct,[{buckets,[{struct,[{name,<<7 bytes>>},{nodeLocator,vbucket},{saslPassword">>, <<>>, <<"0x00002aaab3a999d0 Return addr 0x00002aaaaca5c928 (mochiweb_http:headers/5 + 1176)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"(4) {mochiweb_request,#Port<0.23947>,'GET',\"/pools/default/saslBucketsStreaming\",{1,1}">>, <<"y(5) Catch 0x00002aaab001d488 (menelaus_web:loop/3 + 32920)">>, <<>>, <<"0x00002aaab3a99a08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<"y(2) []">>, <<"(3) {mochiweb_request,#Port<0.23947>,'GET',\"/pools/default/saslBucketsStreaming\",{1,1}">>, <<>>, <<"0x00002aaab3a99a30 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,196418}, {total_heap_size,196418}, {links,[<0.368.0>,#Port<0.23947>]}, {memory,1572536}, {message_queue_len,0}, {reductions,150588}, {trap_exit,false}]}, {<0.23025.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd6c4a8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor_cushion">>, <<"y(3) {state,moxi,5000,{1334,107513,384005},<0.23026.0>}">>, <<"y(4) <0.23025.0>">>,<<"y(5) <0.390.0>">>, <<>>, <<"0x00002aaaabd6c4e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.390.0>,<0.23026.0>]}, {memory,21648}, {message_queue_len,0}, {reductions,1075}, {trap_exit,true}]}, {<0.23026.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab17adcd8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) ns_port_server">>, <<"(3) {state,#Port<0.23945>,moxi,{[\"})\",\"\t}\",\"\t\t\\\"vBucketMap\\\":\t[]\",\"\t\t\\\"serverList\\\":\t[">>, <<"y(4) <0.23026.0>">>, <<"y(5) <0.23025.0>">>,<<>>, <<"0x00002aaab17add10 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,6765}, {total_heap_size,6765}, {links,[<0.23025.0>,#Port<0.23945>]}, {memory,55096}, {message_queue_len,0}, {reductions,4706}, {trap_exit,true}]}, {<0.23201.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaf6a2e38 (single_bucket_sup:top_loop/3 + 64)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6f66d0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) \"default\"">>, <<"y(2) <0.23202.0>">>,<<"y(3) <0.414.0>">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.23202.0>]}, {memory,8800}, {message_queue_len,0}, {reductions,930}, {trap_exit,true}]}, {<0.23202.0>, [{registered_name,'single_bucket_sup-default'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaab84e678 (supervisor:shutdown/2 + 992)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd10f10 Return addr 0x00002aaaab84e110 (supervisor:do_terminate/2 + 264)">>, <<"y(0) infinity">>,<<"y(1) <0.23203.0>">>, <<>>, <<"0x00002aaaabd10f28 Return addr 0x00002aaaab84df80 (supervisor:terminate_children/3 + 376)">>, <<"y(0) {local,'single_bucket_sup-default'}">>, <<"y(1) permanent">>, <<"(2) {child,<0.23203.0>,{ns_memcached_sup,\"default\"},{ns_memcached_sup,start_link,[\"def">>, <<>>, <<"0x00002aaaabd10f48 Return addr 0x00002ac3947f44b0 (gen_server:terminate/6 + 184)">>, <<"(0) [{child,undefined,{capi_ddoc_replication_srv,\"default\"},{capi_ddoc_replication_srv">>, <<"y(1) {local,'single_bucket_sup-default'}">>, <<"y(2) []">>,<<>>, <<"0x00002aaaabd10f68 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>, <<"(1) {state,{local,'single_bucket_sup-default'},one_for_one,[{child,<0.23238.0>,{capi_s">>, <<"y(2) supervisor">>, <<"y(3) {'EXIT',<0.23201.0>,shutdown}">>, <<"y(4) 'single_bucket_sup-default'">>, <<"y(5) shutdown">>, <<"y(6) Catch 0x00002ac3947f44b0 (gen_server:terminate/6 + 184)">>, <<>>, <<"0x00002aaaabd10fa8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,28657}, {total_heap_size,28657}, {links,[<0.23201.0>]}, {memory,230264}, {message_queue_len,0}, {reductions,3095}, {trap_exit,true}]}, {<0.23203.0>, [{registered_name,'ns_memcached_sup-default'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394788890 (proc_lib:sync_wait/2 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39484e400 Return addr 0x00002aaaab847368 (supervisor:do_start_child/2 + 312)">>, <<"y(0) infinity">>,<<"y(1) <0.24861.0>">>, <<>>, <<"0x00002ac39484e418 Return addr 0x00002aaaab846e00 (supervisor:start_children/3 + 144)">>, <<"(0) {child,undefined,{ns_memcached,stats,\"default\"},{ns_memcached,start_link,[{\"defaul">>, <<"y(1) {local,'ns_memcached_sup-default'}">>, <<"y(2) Catch 0x00002aaaab847368 (supervisor:do_start_child/2 + 312)">>, <<>>, <<"0x00002ac39484e438 Return addr 0x00002aaaab84d3a8 (supervisor:restart/3 + 2008)">>, <<"y(0) {local,'ns_memcached_sup-default'}">>, <<"y(1) []">>, <<"(2) [{child,undefined,{ns_memcached,data,\"default\"},{ns_memcached,start_link,[{\"defaul">>, <<"(3) {child,undefined,{ns_memcached,stats,\"default\"},{ns_memcached,start_link,[{\"defaul">>, <<>>, <<"0x00002ac39484e460 Return addr 0x00002aaaab84a590 (supervisor:handle_info/2 + 136)">>, <<"y(0) []">>, <<"(1) {state,{local,'ns_memcached_sup-default'},rest_for_one,[{child,undefined,{failover">>, <<"(2) {child,<0.23205.0>,{ns_memcached,stats,\"default\"},{ns_memcached,start_link,[{\"defa">>, <<"y(3) []">>,<<>>, <<"0x00002ac39484e488 Return addr 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<>>, <<"0x00002ac39484e490 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) supervisor">>, <<"(1) {state,{local,'ns_memcached_sup-default'},rest_for_one,[{child,<0.23223.0>,{failov">>, <<"y(2) 'ns_memcached_sup-default'">>, <<"y(3) <0.23202.0>">>, <<"(4) {'EXIT',<0.23205.0>,{{badmatch,{error,timeout}},[{mc_client_binary,stats_recv,4,[{">>, <<"y(5) Catch 0x00002ac3947f28c0 (gen_server:handle_msg/5 + 1680)">>, <<>>, <<"0x00002ac39484e4c8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,2584}, {total_heap_size,2584}, {links,[<0.24861.0>]}, {memory,21872}, {message_queue_len,6}, {reductions,2175}, {trap_exit,true}]}, {<0.23206.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948530e8 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 64902">>,<<"y(1) #Port<0.24063>">>, <<>>, <<"0x00002ac394853100 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.23207.0>">>, <<"y(1) #Port<0.24063>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.23207.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,83}, {trap_exit,false}]}, {<0.23207.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab712890 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.23208.0>,nil,[],#P">>, <<"y(4) processing">>,<<"y(5) <0.23207.0>">>, <<"y(6) <0.23206.0>">>,<<>>, <<"0x00002aaaab7128d0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.23206.0>,<0.23208.0>,#Port<0.24063>]}, {memory,2880}, {message_queue_len,0}, {reductions,51}, {trap_exit,false}]}, {<0.23208.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab6eb928 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.23208.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_su">>, <<"y(4) <0.23208.0>">>, <<"y(5) <0.23207.0>">>,<<>>, <<"0x00002aaaab6eb960 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.23207.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.23209.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{mc_connection,init,1}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7a1d08 Return addr 0x00002aaaaf6bff78 (mc_connection:loop/2 + 88)">>, <<"y(0) 64906">>,<<"y(1) #Port<0.24065>">>, <<>>, <<"0x00002aaaab7a1d20 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.23210.0>">>, <<"y(1) #Port<0.24065>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.215.0>,<0.23210.0>]}, {memory,2736}, {message_queue_len,0}, {reductions,83}, {trap_exit,false}]}, {<0.23210.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaffc2598 (gen_fsm:loop/7 + 280)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabd6cbf8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) mc_daemon">>, <<"(3) {state,<<7 bytes>>,true,0,nil,[],undefined,undefined,[],[],4,<0.23211.0>,nil,[],#P">>, <<"y(4) processing">>,<<"y(5) <0.23210.0>">>, <<"y(6) <0.23209.0>">>,<<>>, <<"0x00002aaaabd6cc38 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.23209.0>,<0.23211.0>,#Port<0.24065>]}, {memory,2880}, {message_queue_len,0}, {reductions,51}, {trap_exit,false}]}, {<0.23211.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaab7086f8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) supervisor">>, <<"(3) {state,{<0.23211.0>,mc_batch_sup},simple_one_for_one,[{child,undefined,mc_batch_su">>, <<"y(4) <0.23211.0>">>, <<"y(5) <0.23210.0>">>,<<>>, <<"0x00002aaaab708730 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.23210.0>]}, {memory,2800}, {message_queue_len,0}, {reductions,48}, {trap_exit,true}]}, {<0.23228.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1830c50 Return addr 0x00002aaaacd3c328 (couch_file:init/1 + 1032)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_file">>, <<"y(3) {file,<0.23230.0>,<0.23231.0>,34}">>, <<"y(4) <0.23228.0>">>, <<"y(5) <0.23227.0>">>,<<>>, <<"0x00002aaab1830c88 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) \"/opt/couchbase/var/lib/couchdb/default/master.couch.1\"">>, <<"y(5) Catch 0x00002aaaacd3c348 (couch_file:init/1 + 1064)">>, <<>>, <<"0x00002aaab1830cc0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.23231.0>,<0.23235.0>,<0.23230.0>]}, {memory,9056}, {message_queue_len,0}, {reductions,438}, {trap_exit,true}]}, {<0.23229.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaab9ae838 (file_io_server:server_loop/1 + 152)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394880d50 Return addr 0x0000000000875c98 ()">>, <<"(0) {state,{file_descriptor,prim_file,{#Port<0.24069>,49}},<0.23228.0>,#Ref<0.0.2.4997">>, <<"y(1) #Ref<0.0.2.49973>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.18.0>,#Port<0.24069>]}, {memory,2808}, {message_queue_len,0}, {reductions,268}, {trap_exit,false}]}, {<0.23230.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd42350 (couch_file:reader_loop/3 + 216)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b68f90 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>, <<"y(1) \"/opt/couchbase/var/lib/couchdb/default/master.couch.1\"">>, <<"y(2) []">>,<<>>, <<"0x00002aaab1b68fb0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.23228.0>]}, {memory,3952}, {message_queue_len,0}, {reductions,346}, {trap_exit,true}]}, {<0.23231.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaacd40eb8 (couch_file:writer_loop/4 + 232)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1831870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) 10">>,<<"y(1) 34">>, <<"y(2) \"/opt/couchbase/var/lib/couchdb/default/master.couch.1\"">>, <<"y(3) []">>,<<>>, <<"0x00002aaab1831898 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.23228.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,712}, {trap_exit,true}]}, {<0.23232.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"rogram counter: 0x00002aaaaf6f1710 (couch_stats_collector:'-track_process_count/2-fun-0-'/">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1837f48 Return addr 0x0000000000875c98 ()">>, <<"y(0) #Ref<0.0.2.49982>">>, <<"y(1) {couchdb,open_os_files}">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[]}, {memory,2728}, {message_queue_len,0}, {reductions,3}, {trap_exit,false}]}, {<0.23233.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394868db8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db">>, <<"(3) {db,<0.23233.0>,<0.23234.0>,nil,<<16 bytes>>,<0.23228.0>,<0.23235.0>,{db_header,10">>, <<"y(4) <0.23233.0>">>, <<"y(5) <0.23227.0>">>,<<>>, <<"0x00002ac394868df0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,987}, {total_heap_size,987}, {links,[<0.182.0>,<0.23234.0>]}, {memory,9016}, {message_queue_len,0}, {reductions,229}, {trap_exit,true}]}, {<0.23234.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b6b880 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_db_updater">>, <<"(3) {db,<0.23233.0>,<0.23234.0>,nil,<<16 bytes>>,<0.23228.0>,<0.23235.0>,{db_header,10">>, <<"y(4) <0.23234.0>">>, <<"y(5) <0.23233.0>">>,<<>>, <<"0x00002aaab1b6b8b8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,610}, {total_heap_size,610}, {links,[<0.23233.0>]}, {memory,5888}, {message_queue_len,0}, {reductions,1026}, {trap_exit,true}]}, {<0.23235.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1835ad0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) couch_ref_counter">>, <<"(3) {srv,{dict,2,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]},{{[],">>, <<"y(4) <0.23235.0>">>, <<"y(5) <0.23235.0>">>,<<>>, <<"0x00002aaab1835b08 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.23228.0>]}, {memory,4096}, {message_queue_len,0}, {reductions,265}, {trap_exit,false}]}, {<0.23236.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"rogram counter: 0x00002aaaaf6f1710 (couch_stats_collector:'-track_process_count/2-fun-0-'/">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab1b66c50 Return addr 0x0000000000875c98 ()">>, <<"y(0) #Ref<0.0.2.49993>">>, <<"y(1) {couchdb,open_databases}">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[]}, {memory,2728}, {message_queue_len,0}, {reductions,3}, {trap_exit,false}]}, {<0.24585.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaab001ff50 (menelaus_web:handle_pool_info_wait/6 + 400)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"x00002aaab86940e0 Return addr 0x00002aaab001f968 (menelaus_web:check_and_handle_pool_info/">>, <<"y(0) 19994">>,<<"y(1) \"88983614\"">>, <<"y(2) \"88983614\"">>, <<"y(3) 1334107841365">>, <<"y(4) \"10.1.2.30\"">>, <<"y(5) {\"Administrator\",\"password\"}">>, <<"y(6) \"default\"">>, <<"(7) {mochiweb_request,#Port<0.24488>,'GET',\"/pools/default?waitChange=20000&etag=88983">>, <<>>, <<"0x00002aaab8694128 Return addr 0x00002aaab001d3a8 (menelaus_web:loop/3 + 32696)">>, <<"y(0) Catch 0x00002aaab001f9b8 (menelaus_web:check_and_handle_pool_info/2 + 328)">>, <<"y(1) []">>,<<"y(2) <0.25168.0>">>,<<>>, <<"0x00002aaab8694148 Return addr 0x00002aaaaca5c928 (mochiweb_http:headers/5 + 1176)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"(4) {mochiweb_request,#Port<0.24488>,'GET',\"/pools/default?waitChange=20000&etag=88983">>, <<"y(5) Catch 0x00002aaab001d488 (menelaus_web:loop/3 + 32920)">>, <<>>, <<"0x00002aaab8694180 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<"y(2) []">>, <<"(3) {mochiweb_request,#Port<0.24488>,'GET',\"/pools/default?waitChange=20000&etag=88983">>, <<>>, <<"0x00002aaab86941a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,121393}, {total_heap_size,121393}, {links,[<0.368.0>,<0.25168.0>,#Port<0.24488>]}, {memory,972376}, {message_queue_len,0}, {reductions,194880}, {trap_exit,false}]}, {<0.24618.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947f00d0 (gen_server:do_multi_call/4 + 736)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab27ab7b0 Return addr 0x00002aaaab83c850 (rpc:do_multicall/5 + 256)">>, <<"y(0) []">>,<<"y(1) #Ref<0.0.2.82016>">>, <<"y(2) <0.25197.0>">>, <<"y(3) #Ref<0.0.2.82015>">>,<<>>, <<"0x00002aaab27ab7d8 Return addr 0x00002aaaaca7fd30 (diag_handler:diag_multicall/3 + 208)">>, <<"y(0) []">>,<<"y(1) []">>,<<>>, <<"0x00002aaab27ab7f0 Return addr 0x00002aaaaca80860 (diag_handler:do_handle_diag/3 + 368)">>, <<"y(0) ['ns_1@10.1.2.30']">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>,<<>>, <<"0x00002aaab27ab818 Return addr 0x00002aaab001d068 (menelaus_web:loop/3 + 31864)">>, <<"y(0) []">>, <<"(1) [{\"default\",[{sasl_password,[]},{num_replicas,1},{replica_index,true},{ram_quota,1">>, <<"y(2) \"log\"">>, <<"(3) [{\"Content-Disposition\",[97,116,116,97,99,104,109,101,110,116,59,32,102,105,108,10">>, <<"(4) {mochiweb_request,#Port<0.24489>,'GET',\"/diag\",{1,1},{9,{\"host\",{'Host',\"10.1.2.30">>, <<>>, <<"0x00002aaab27ab848 Return addr 0x00002aaaaca5c928 (mochiweb_http:headers/5 + 1176)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"(4) {mochiweb_request,#Port<0.24489>,'GET',\"/diag\",{1,1},{9,{\"host\",{'Host',\"10.1.2.30">>, <<"y(5) Catch 0x00002aaab001d488 (menelaus_web:loop/3 + 32920)">>, <<>>, <<"0x00002aaab27ab880 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) #Fun">>, <<"y(1) []">>,<<"y(2) []">>, <<"(3) {mochiweb_request,#Port<0.24489>,'GET',\"/diag\",{1,1},{9,{\"host\",{'Host',\"10.1.2.30">>, <<>>, <<"0x00002aaab27ab8a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.368.0>,#Port<0.24489>]}, {memory,601320}, {message_queue_len,0}, {reductions,94788}, {trap_exit,false}]}, {<0.24630.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaca5bf48 (mochiweb_http:request/2 + 88)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2da3878 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>, <<"y(3) #Fun">>, <<"y(4) #Port<0.24490>">>,<<>>, <<"0x00002aaab2da38a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.368.0>,#Port<0.24490>]}, {memory,601176}, {message_queue_len,0}, {reductions,130043}, {trap_exit,false}]}, {<0.24699.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaca5bf48 (mochiweb_http:request/2 + 88)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2718878 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>, <<"y(3) #Fun">>, <<"y(4) #Port<0.24569>">>,<<>>, <<"0x00002aaab27188a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.368.0>,#Port<0.24569>]}, {memory,601176}, {message_queue_len,0}, {reductions,99618}, {trap_exit,false}]}, {<0.24861.0>, [{registered_name,'ns_memcached-default'}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947262d0 (prim_inet:recv0/3 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab8125780 Return addr 0x00002aaaaf6a02d0 (mc_binary:recv/3 + 168)">>, <<"y(0) 4055">>,<<"y(1) #Port<0.24693>">>, <<>>, <<"x00002aaab8125798 Return addr 0x00002aaaaf6c5ff8 (mc_client_binary:cmd_binary_vocal_recv/5">>, <<"y(0) 15000">>,<<"y(1) res">>, <<"y(2) #Port<0.24693>">>,<<>>, <<"0x00002aaab81257b8 Return addr 0x00002aaaaf6c6af0 (mc_client_binary:auth/2 + 1504)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) undefined">>,<<"y(3) undefined">>, <<"y(4) undefined">>, <<"y(5) #Port<0.24693>">>,<<"y(6) 33">>,<<>>, <<"0x00002aaab81257f8 Return addr 0x00002aaab006b638 (ns_memcached:connect/1 + 768)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>,<<>>, <<"0x00002aaab8125818 Return addr 0x00002aaab0064670 (ns_memcached:init/1 + 152)">>, <<"y(0) Catch 0x00002aaab006b6b0 (ns_memcached:connect/1 + 888)">>, <<"y(1) []">>,<<"y(2) #Port<0.24693>">>, <<"y(3) 2">>,<<>>, <<"0x00002aaab8125840 Return addr 0x00002ac3947eef18 (gen_server:init_it/6 + 376)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) stats">>,<<"y(3) \"default\"">>,<<>>, <<"0x00002aaab8125868 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) Catch 0x00002ac3947eef18 (gen_server:init_it/6 + 376)">>, <<"y(1) 'ns_memcached-default'">>, <<"y(2) []">>,<<"y(3) ns_memcached">>, <<"y(4) {local,'ns_memcached-default'}">>, <<"y(5) <0.23203.0>">>, <<"y(6) <0.23203.0>">>,<<>>, <<"0x00002aaab81258a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links, [#Port<0.24626>,#Port<0.24693>,<0.23203.0>, #Port<0.24659>,#Port<0.24571>]}, {memory,602088}, {message_queue_len,18}, {reductions,8009}, {trap_exit,true}]}, {<0.24894.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac39476b390 (gen:do_call/4 + 576)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab2c7d7b8 Return addr 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<"y(0) #Ref<0.0.2.80207>">>, <<"y(1) 'ns_1@10.1.2.30'">>,<<"y(2) []">>, <<"y(3) 30000">>, <<"y(4) list_vbuckets_prevstate">>, <<"y(5) '$gen_call'">>, <<"y(6) <0.24861.0>">>,<<>>, <<"x00002aaab2c7d7f8 Return addr 0x00002aaaaca8ad98 (ns_janitor_map_recoverer:'-read_existing">>, <<"y(0) 30000">>, <<"y(1) list_vbuckets_prevstate">>, <<"y(2) {'ns_memcached-default','ns_1@10.1.2.30'}">>, <<"y(3) Catch 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<>>, <<"x00002aaab2c7d820 Return addr 0x00002aaaaca89898 (ns_janitor_map_recoverer:read_existing_m">>, <<"y(0) \"default\"">>,<<"y(1) []">>, <<"y(2) 'ns_1@10.1.2.30'">>,<<>>, <<"0x00002aaab2c7d840 Return addr 0x00002aaaaf68a048 (ns_janitor:do_cleanup/3 + 640)">>, <<"y(0) []">>,<<"y(1) []">>,<<"y(2) 1">>, <<"y(3) 256">>,<<"y(4) \"default\"">>,<<>>, <<"0x00002aaab2c7d870 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"(2) [{sasl_password,[]},{num_replicas,1},{replica_index,true},{ram_quota,1435500544},{">>, <<"y(3) ['ns_1@10.1.2.30']">>,<<"y(4) []">>, <<"y(5) \"default\"">>,<<>>, <<"0x00002aaab2c7d8a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.345.0>]}, {memory,601208}, {message_queue_len,0}, {reductions,7314}, {trap_exit,false}]}, {<0.25008.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac394841738 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4039">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002ac394841750 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002ac394841760 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,332089}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002ac394841790 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,306}, {trap_exit,false}]}, {<0.25065.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabc50f18 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4043">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabc50f30 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabc50f40 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,495077}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabc50f70 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,233}, {trap_exit,false}]}, {<0.25077.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948329e0 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4044">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002ac3948329f8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002ac394832a08 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,528021}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002ac394832a38 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,220}, {trap_exit,false}]}, {<0.25089.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0671688 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4045">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaab06716a0 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab06716b0 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,567025}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaab06716e0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,207}, {trap_exit,false}]}, {<0.25101.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae7fa0 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4046">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaacae7fb8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacae7fc8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,597021}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaacae7ff8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,173}, {trap_exit,false}]}, {<0.25110.0>, [{registered_name,hot_keys_keeper}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafcd9d70 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) hot_keys_keeper">>, <<"y(3) {state,[],[],<0.25141.0>}">>, <<"y(4) hot_keys_keeper">>, <<"y(5) <0.366.0>">>,<<>>, <<"0x00002aaaafcd9da8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.366.0>,<0.25141.0>,<0.56.0>]}, {memory,2880}, {message_queue_len,0}, {reductions,57}, {trap_exit,false}]}, {<0.25117.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf2768 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4047">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabbf2780 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabbf2790 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,634065}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabbf27c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,160}, {trap_exit,false}]}, {<0.25127.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafce1bd8 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4048">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaafce1bf0 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaafce1c00 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,733069}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaafce1c30 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,147}, {trap_exit,false}]}, {<0.25139.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac39483deb0 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4049">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002ac39483dec8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002ac39483ded8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,770022}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002ac39483df08 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,134}, {trap_exit,false}]}, {<0.25141.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002ac39476b390 (gen:do_call/4 + 576)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0582360 Return addr 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<"y(0) #Ref<0.0.2.80986>">>, <<"y(1) 'ns_1@10.1.2.30'">>,<<"y(2) []">>, <<"y(3) 30000">>,<<"y(4) topkeys">>, <<"y(5) '$gen_call'">>, <<"y(6) <0.24861.0>">>,<<>>, <<"x00002aaab05823a0 Return addr 0x00002aaab00724b8 (hot_keys_keeper:grab_bucket_topkeys/1 + ">>, <<"y(0) 30000">>,<<"y(1) topkeys">>, <<"y(2) 'ns_memcached-default'">>, <<"y(3) Catch 0x00002ac3947ee0f8 (gen_server:call/3 + 128)">>, <<>>, <<"x00002aaab05823c8 Return addr 0x00002aaab0072e70 (hot_keys_keeper:'-keys_updater_body/0-lc">>, <<>>, <<"x00002aaab05823d0 Return addr 0x00002aaab0072888 (hot_keys_keeper:keys_updater_body/0 + 72">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) \"default\"">>,<<>>, <<"0x00002aaab05823f0 Return addr 0x0000000000875c98 ()">>, <<"y(0) []">>,<<"y(1) []">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,28657}, {total_heap_size,28657}, {links,[<0.25110.0>]}, {memory,230160}, {message_queue_len,0}, {reductions,2305}, {trap_exit,false}]}, {<0.25155.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbf1b90 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4050">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabbf1ba8 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabbf1bb8 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,804066}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabbf1be8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,100}, {trap_exit,false}]}, {<0.25165.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab066ca38 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4040">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaab066ca50 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaab066ca60 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,343021}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaab066ca90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,87}, {trap_exit,false}]}, {<0.25167.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaca814f8 (diag_handler:'-arm_timeout/2-fun-0-'/3 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002ac3948904d8 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.3688.0>">>, <<"y(1) #Fun">>, <<"y(2) 23000">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.3688.0>]}, {memory,2696}, {message_queue_len,0}, {reductions,2}, {trap_exit,false}]}, {<0.25168.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaca814f8 (diag_handler:'-arm_timeout/2-fun-0-'/3 + 56)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab0672580 Return addr 0x0000000000875c98 ()">>, <<"y(0) <0.24585.0>">>, <<"y(1) #Fun">>, <<"y(2) 23000">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.24585.0>]}, {memory,2696}, {message_queue_len,0}, {reductions,2}, {trap_exit,false}]}, {<0.25169.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaafd167f0 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4051">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaafd16808 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaafd16818 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,852021}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaafd16848 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,87}, {trap_exit,false}]}, {<0.25181.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbfad38 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4041">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabbfad50 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabbfad60 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,376022}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabbfad90 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,74}, {trap_exit,false}]}, {<0.25183.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaacae5268 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4052">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaacae5280 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaacae5290 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,890063}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaacae52c0 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,74}, {trap_exit,false}]}, {<0.25189.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002aaaaca5bf48 (mochiweb_http:request/2 + 88)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaab3df6878 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>, <<"y(3) #Fun">>, <<"y(4) #Port<0.24712>">>,<<>>, <<"0x00002aaab3df68a8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,75025}, {total_heap_size,75025}, {links,[<0.368.0>,#Port<0.24712>]}, {memory,601176}, {message_queue_len,0}, {reductions,18698}, {trap_exit,false}]}, {<0.25192.0>, [{registered_name,menelaus_web_alerts_srv}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac3947ef798 (gen_server:loop/6 + 264)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca8c80 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) []">>,<<"y(1) infinity">>, <<"y(2) menelaus_web_alerts_srv">>, <<"(3) {state,[],[],{dict,0,16,16,8,80,48,{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[">>, <<"y(4) menelaus_web_alerts_srv">>, <<"y(5) <0.366.0>">>,<<>>, <<"0x00002aaaabca8cb8 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[<0.366.0>,<0.56.0>]}, {memory,2840}, {message_queue_len,0}, {reductions,43}, {trap_exit,false}]}, {<0.25193.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabca8508 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4042">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabca8520 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabca8530 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,429239}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabca8560 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,61}, {trap_exit,false}]}, {<0.25195.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{proc_lib,init_p,5}}, {backtrace, [<<"Program counter: 0x00002ac394724c20 (prim_inet:accept0/2 + 184)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"0x00002aaaabbfb910 Return addr 0x00002aaaac9f3578 (inet_tcp:accept/2 + 40)">>, <<"y(0) 4053">>,<<"y(1) #Port<0.6828>">>, <<>>, <<"0x00002aaaabbfb928 Return addr 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(0) []">>,<<>>, <<"0x00002aaaabbfb938 Return addr 0x00002ac394788050 (proc_lib:init_p_do_apply/3 + 56)">>, <<"y(0) {1334,107825,943197}">>, <<"y(1) Catch 0x00002aaaaca52130 (mochiweb_acceptor:init/3 + 168)">>, <<"y(2) #Fun">>, <<"y(3) #Port<0.6828>">>, <<"y(4) <0.368.0>">>,<<>>, <<"0x00002aaaabbfb968 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002ac394788070 (proc_lib:init_p_do_apply/3 + 88)">>, <<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,377}, {total_heap_size,377}, {links,[<0.368.0>]}, {memory,4024}, {message_queue_len,0}, {reductions,61}, {trap_exit,false}]}, {<0.25197.0>, [{registered_name,[]}, {status,waiting}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002ac3947f0678 (gen_server:rec_nodes/7 + 224)">>, <<"CP: 0x0000000000000000 (invalid)">>, <<"arity = 0">>,<<>>, <<"x00002aaaabbf7850 Return addr 0x00002ac3947f60a8 (gen_server:'-do_multi_call/4-fun-0-'/6 +">>, <<"y(0) #Ref<0.0.2.82019>">>,<<"y(1) 2000">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) rex">>,<<"y(5) #Ref<0.0.2.82015>">>, <<"y(6) []">>,<<"y(7) #Ref<0.0.2.82018>">>, <<"y(8) 'ns_1@10.1.2.30'">>,<<>>, <<"0x00002aaaabbf78a0 Return addr 0x0000000000875c98 ()">>, <<"y(0) #Ref<0.0.2.82015>">>,<<"y(1) []">>, <<"y(2) []">>,<<"y(3) []">>, <<"y(4) []">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,233}, {total_heap_size,233}, {links,[]}, {memory,2872}, {message_queue_len,0}, {reductions,13}, {trap_exit,true}]}, {<0.25198.0>, [{registered_name,[]}, {status,running}, {initial_call,{erlang,apply,2}}, {backtrace, [<<"Program counter: 0x00002aaaaad930f8 (unknown function)">>, <<"CP: 0x00002aaaaca7f848 (diag_handler:grab_process_info/1 + 56)">>, <<>>, <<"x00002aaab21641c8 Return addr 0x00002aaaaca81ca0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) []">>,<<>>, <<"x00002aaab21641d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) []">>,<<"y(1) <0.25198.0>">>, <<"y(2) Catch 0x00002aaaaca81ca0 (diag_handler:'-do_diag_per_node/0-lc$^0/1-0-'/1 + 104)">>, <<>>, <<"x00002aaab21641f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25197.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164210 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25195.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164228 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25193.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164240 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25192.0>">>, <<"(1) [{registered_name,menelaus_web_alerts_srv},{status,waiting},{initial_call,{proc_li">>, <<>>, <<"x00002aaab2164258 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25189.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164270 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25183.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164288 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25181.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21642a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25169.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21642b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25168.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab21642d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25167.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab21642e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25165.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164300 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25155.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164318 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25141.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164330 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25139.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164348 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25127.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164360 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25117.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164378 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25110.0>">>, <<"(1) [{registered_name,hot_keys_keeper},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2164390 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25101.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21643a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25089.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21643c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25077.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21643d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25065.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21643f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25008.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164408 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24894.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164420 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24861.0>">>, <<"(1) [{registered_name,'ns_memcached-default'},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2164438 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24699.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164450 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24630.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164468 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24618.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164480 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24585.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164498 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23236.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab21644b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23235.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21644c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23234.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21644e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23233.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21644f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23232.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164510 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23231.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164528 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23230.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164540 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23229.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164558 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23228.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164570 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23211.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164588 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23210.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21645a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23209.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab21645b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23208.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21645d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23207.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21645e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23206.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164600 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23203.0>">>, <<"(1) [{registered_name,'ns_memcached_sup-default'},{status,waiting},{initial_call,{proc">>, <<>>, <<"x00002aaab2164618 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23202.0>">>, <<"(1) [{registered_name,'single_bucket_sup-default'},{status,waiting},{initial_call,{pro">>, <<>>, <<"x00002aaab2164630 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23201.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164648 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23026.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164660 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23025.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164678 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.22443.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164690 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.13097.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21646a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.13096.0>">>, <<"(1) [{registered_name,ns_doctor},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab21646c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12997.0>">>, <<"(1) [{registered_name,mnesia_late_loader},{status,waiting},{initial_call,{proc_lib,ini">>, <<>>, <<"x00002aaab21646d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12996.0>">>, <<"(1) [{registered_name,mnesia_controller},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab21646f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12995.0>">>, <<"(1) [{registered_name,mnesia_snmp_sup},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2164708 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12994.0>">>, <<"(1) [{registered_name,mnesia_checkpoint_sup},{status,waiting},{initial_call,{proc_lib,">>, <<>>, <<"x00002aaab2164720 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12992.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164738 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12925.0>">>, <<"(1) [{registered_name,mnesia_tm},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2164750 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12924.0>">>, <<"(1) [{registered_name,mnesia_recover},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2164768 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12923.0>">>, <<"(1) [{registered_name,mnesia_locker},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2164780 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12922.0>">>, <<"(1) [{registered_name,mnesia_subscr},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2164798 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12921.0>">>, <<"(1) [{registered_name,mnesia_monitor},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab21647b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12920.0>">>, <<"(1) [{registered_name,mnesia_kernel_sup},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab21647c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12919.0>">>, <<"(1) [{registered_name,mnesia_event},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab21647e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12918.0>">>, <<"(1) [{registered_name,mnesia_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab21647f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12917.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2164810 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12916.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164828 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12885.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164840 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12883.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{net_kernel,ticker,2}},{backt">>, <<>>, <<"x00002aaab2164858 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12880.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{inet_tcp_dist,accept_loop,2}">>, <<>>, <<"x00002aaab2164870 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12869.0>">>, <<"(1) [{registered_name,xdc_rdoc_replication_srv},{status,waiting},{initial_call,{proc_l">>, <<>>, <<"x00002aaab2164888 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12865.0>">>, <<"(1) [{registered_name,net_kernel},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab21648a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12863.0>">>, <<"(1) [{registered_name,auth},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backt">>, <<>>, <<"x00002aaab21648b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12862.0>">>, <<"(1) [{registered_name,erl_epmd},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab21648d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12857.0>">>, <<"(1) [{registered_name,net_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab21648e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10179.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164900 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10178.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164918 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10177.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164930 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10176.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164948 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10175.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164960 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10174.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164978 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6957.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164990 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6956.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21649a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6955.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab21649c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6954.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21649d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6953.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21649f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6952.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164a08 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3775.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164a20 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3774.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164a38 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3773.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164a50 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3772.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164a68 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3771.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164a80 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3770.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164a98 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3688.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164ab0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3685.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164ac8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.569.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164ae0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.568.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164af8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.566.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164b10 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.565.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164b28 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.564.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164b40 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.563.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_connection,init,1}},{back">>, <<>>, <<"x00002aaab2164b58 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.475.0>">>, <<"(1) [{registered_name,inet_gethost_native},{status,waiting},{initial_call,{inet_gethos">>, <<>>, <<"x00002aaab2164b70 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.474.0>">>, <<"(1) [{registered_name,inet_gethost_native_sup},{status,waiting},{initial_call,{proc_li">>, <<>>, <<"x00002aaab2164b88 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.430.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,3}},{backtra">>, <<>>, <<"x00002aaab2164ba0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.429.0>">>, <<"(1) [{registered_name,couchbase_compaction_daemon},{status,waiting},{initial_call,{pro">>, <<>>, <<"x00002aaab2164bb8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.426.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164bd0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.423.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164be8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.422.0>">>, <<"(1) [{registered_name,ns_moxi_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2164c00 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.421.0>">>, <<"(1) [{registered_name,ns_moxi_sup_work_queue},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2164c18 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.420.0>">>, <<"(1) [{registered_name,'stats_reader-@system'},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2164c30 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.419.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164c48 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.418.0>">>, <<"(1) [{registered_name,'stats_archiver-@system'},{status,waiting},{initial_call,{proc_l">>, <<>>, <<"x00002aaab2164c60 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.417.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164c78 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.416.0>">>, <<"(1) [{registered_name,system_stats_collector},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2164c90 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.415.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164ca8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.414.0>">>, <<"(1) [{registered_name,ns_bucket_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2164cc0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.413.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164cd8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.412.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164cf0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.411.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164d08 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.410.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164d20 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.409.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164d38 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.408.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164d50 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.407.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164d68 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.406.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164d80 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.405.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164d98 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.404.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164db0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.401.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164dc8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.400.0>">>, <<"(1) [{registered_name,xdc_rep_manager},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2164de0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.399.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164df8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.398.0>">>, <<"(1) [{registered_name,ns_bucket_worker},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab2164e10 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.397.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164e28 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.396.0>">>, <<"(1) [{registered_name,ns_port_memcached},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2164e40 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.394.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164e58 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.391.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164e70 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.390.0>">>, <<"(1) [{registered_name,ns_port_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2164e88 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.387.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164ea0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.368.0>">>, <<"(1) [{registered_name,menelaus_web},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2164eb8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.366.0>">>, <<"(1) [{registered_name,menelaus_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2164ed0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.362.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164ee8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.361.0>">>, <<"(1) [{registered_name,ns_heart},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab2164f00 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.360.0>">>, <<"(1) [{registered_name,ns_stats_event},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2164f18 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.359.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2164f30 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.358.0>">>, <<"(1) [{registered_name,ns_mail},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2164f48 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.357.0>">>, <<"(1) [{registered_name,ns_mail_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2164f60 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.356.0>">>, <<"(1) [{registered_name,buckets_events},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2164f78 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.355.0>">>, <<"(1) [{registered_name,master_activity_events_srv},{status,waiting},{initial_call,{proc">>, <<>>, <<"x00002aaab2164f90 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.354.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164fa8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.353.0>">>, <<"(1) [{registered_name,master_activity_events_keeper},{status,waiting},{initial_call,{p">>, <<>>, <<"x00002aaab2164fc0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.352.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2164fd8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.351.0>">>, <<"(1) [{registered_name,master_activity_events_ingress},{status,waiting},{initial_call,{">>, <<>>, <<"x00002aaab2164ff0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.350.0>">>, <<"(1) [{registered_name,master_activity_events},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2165008 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.349.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165020 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.348.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165038 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.347.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165050 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.346.0>">>, <<"(1) [{registered_name,cb_replication},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2165068 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.345.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165080 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.344.0>">>, <<"(1) [{registered_name,mb_master_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165098 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.343.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21650b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.342.0>">>, <<"(1) [{registered_name,mb_master},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab21650c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.339.0>">>, <<"(1) [{registered_name,ns_tick_event},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab21650e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.338.0>">>, <<"(1) [{registered_name,ns_config_rep_merger},{status,waiting},{initial_call,{erlang,app">>, <<>>, <<"x00002aaab21650f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.333.0>">>, <<"(1) [{registered_name,ns_config_rep},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165110 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.332.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165128 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.331.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165140 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.327.0>">>, <<"(1) [{registered_name,ns_node_disco},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165158 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.326.0>">>, <<"(1) [{registered_name,ns_node_disco_events},{status,waiting},{initial_call,{proc_lib,i">>, <<>>, <<"x00002aaab2165170 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.325.0>">>, <<"(1) [{registered_name,ns_node_disco_sup},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165188 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.324.0>">>, <<"(1) [{registered_name,ns_log_events},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab21651a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.323.0>">>, <<"(1) [{registered_name,ns_log},{status,waiting},{initial_call,{proc_lib,init_p,5}},{bac">>, <<>>, <<"x00002aaab21651b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.322.0>">>, <<"(1) [{registered_name,ns_server_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab21651d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.318.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21651e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.317.0>">>, <<"(1) [{registered_name,cb_config_couch_sync},{status,waiting},{initial_call,{proc_lib,i">>, <<>>, <<"x00002aaab2165200 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.316.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165218 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.315.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165230 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.313.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165248 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.312.0>">>, <<"(1) [{registered_name,ns_config_isasl_sync},{status,waiting},{initial_call,{proc_lib,i">>, <<>>, <<"x00002aaab2165260 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.311.0>">>, <<"(1) [{registered_name,ns_config_remote},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab2165278 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.308.0>">>, <<"(1) [{registered_name,ns_config},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2165290 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.307.0>">>, <<"(1) [{registered_name,ns_config_events},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab21652a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.306.0>">>, <<"(1) [{registered_name,ns_config_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab21652c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.281.0>">>, <<"(1) [{registered_name,dets},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backt">>, <<>>, <<"x00002aaab21652d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.280.0>">>, <<"(1) [{registered_name,dets_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab21652f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.258.0>">>, <<"(1) [{registered_name,mb_mnesia},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2165308 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.257.0>">>, <<"(1) [{registered_name,mb_mnesia_events},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab2165320 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.256.0>">>, <<"(1) [{registered_name,mb_mnesia_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165338 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.255.0>">>, <<"(1) [{registered_name,ns_cluster},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab2165350 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.254.0>">>, <<"(1) [{registered_name,ns_cookie_manager},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165368 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.247.0>">>, <<"(1) [{registered_name,dist_manager},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165380 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.246.0>">>, <<"(1) [{registered_name,timeout_diag_logger},{status,waiting},{initial_call,{proc_lib,in">>, <<>>, <<"x00002aaab2165398 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.245.0>">>, <<"(1) [{registered_name,couch_log},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab21653b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.243.0>">>, <<"(1) [{registered_name,couch_uuids},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab21653c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.242.0>">>, <<"(1) [{registered_name,couch_external_manager},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab21653e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.241.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21653f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.240.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165410 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.239.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165428 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.238.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165440 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.237.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165458 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.236.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165470 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.235.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165488 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.234.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21654a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.233.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21654b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.232.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21654d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.231.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21654e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.230.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165500 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.229.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165518 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.228.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165530 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.227.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165548 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.226.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165560 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.225.0>">>, <<"(1) [{registered_name,couch_httpd},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2165578 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.224.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165590 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.223.0>">>, <<"(1) [{registered_name,couch_view},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab21655a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.221.0>">>, <<"(1) [{registered_name,couch_query_servers},{status,waiting},{initial_call,{proc_lib,in">>, <<>>, <<"x00002aaab21655c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.220.0>">>, <<"(1) [{registered_name,couch_index_merger_connection_pool},{status,waiting},{initial_ca">>, <<>>, <<"x00002aaab21655d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.219.0>">>, <<"(1) [{registered_name,couch_spatial},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab21655f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.218.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165608 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.217.0>">>, <<"(1) [{registered_name,couch_set_view},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2165620 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.216.0>">>, <<"(1) [{registered_name,couch_httpd_vhost},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165638 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.215.0>">>, <<"(1) [{registered_name,mc_conn_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2165650 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.214.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{mc_tcp_listener,init,1}},{ba">>, <<>>, <<"x00002aaab2165668 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.213.0>">>, <<"(1) [{registered_name,mc_couch_events},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165680 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.212.0>">>, <<"(1) [{registered_name,mc_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{bac">>, <<>>, <<"x00002aaab2165698 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.211.0>">>, <<"(1) [{registered_name,couch_os_daemons},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab21656b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.210.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21656c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.207.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21656e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.205.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21656f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.204.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165710 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.203.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165728 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.202.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165740 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.201.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165758 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.200.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165770 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.197.0>">>, <<"(1) [{registered_name,couch_auth_cache},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab2165788 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.196.0>">>, <<"(1) [{registered_name,couch_db_update_notifier_sup},{status,waiting},{initial_call,{pr">>, <<>>, <<"x00002aaab21657a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.195.0>">>, <<"(1) [{registered_name,couch_secondary_services},{status,waiting},{initial_call,{proc_l">>, <<>>, <<"x00002aaab21657b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.193.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21657d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.192.0>">>, <<"(1) [{registered_name,couch_access_log},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab21657e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.191.0>">>, <<"(1) [{registered_name,couch_replica_index_barrier},{status,waiting},{initial_call,{pro">>, <<>>, <<"x00002aaab2165800 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.190.0>">>, <<"(1) [{registered_name,couch_main_index_barrier},{status,waiting},{initial_call,{proc_l">>, <<>>, <<"x00002aaab2165818 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.186.0>">>, <<"(1) [{registered_name,couch_rep_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165830 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.185.0>">>, <<"(1) [{registered_name,couch_replication},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165848 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.184.0>">>, <<"(1) [{registered_name,couch_db_update},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165860 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.183.0>">>, <<"(1) [{registered_name,couch_compress_types},{status,waiting},{initial_call,{proc_lib,i">>, <<>>, <<"x00002aaab2165878 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.182.0>">>, <<"(1) [{registered_name,couch_server},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165890 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.181.0>">>, <<"(1) [{registered_name,couch_file_write_guard},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab21658a8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.180.0>">>, <<"(1) [{registered_name,couch_task_status},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab21658c0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.179.0>">>, <<"(1) [{registered_name,couch_task_events},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab21658d8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.178.0>">>, <<"(1) [{registered_name,couch_drv},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab21658f0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.177.0>">>, <<"(1) [{registered_name,couch_primary_services},{status,waiting},{initial_call,{proc_lib">>, <<>>, <<"x00002aaab2165908 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.176.0>">>, <<"(1) [{registered_name,couch_server_sup},{status,waiting},{initial_call,{proc_lib,init_">>, <<>>, <<"x00002aaab2165920 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.175.0>">>, <<"(1) [{registered_name,couch_config},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165938 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.172.0>">>, <<"(1) [{registered_name,mochiweb_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165950 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.171.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165968 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.170.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165980 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.168.0>">>, <<"(1) [{registered_name,lhttpc_manager},{status,waiting},{initial_call,{proc_lib,init_p,">>, <<>>, <<"x00002aaab2165998 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.167.0>">>, <<"(1) [{registered_name,lhttpc_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab21659b0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.166.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab21659c8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.165.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21659e0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.163.0>">>, <<"(1) [{registered_name,ssl_connection_sup},{status,waiting},{initial_call,{proc_lib,ini">>, <<>>, <<"x00002aaab21659f8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.162.0>">>, <<"(1) [{registered_name,ssl_manager},{status,waiting},{initial_call,{proc_lib,init_p,5}}">>, <<>>, <<"x00002aaab2165a10 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.161.0>">>, <<"(1) [{registered_name,ssl_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2165a28 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.160.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165a40 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.159.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165a58 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.156.0>">>, <<"(1) [{registered_name,tftp_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab2165a70 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.155.0>">>, <<"(1) [{registered_name,httpd_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2165a88 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.154.0>">>, <<"(1) [{registered_name,httpc_handler_sup},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165aa0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.153.0>">>, <<"(1) [{registered_name,httpc_manager},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165ab8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.152.0>">>, <<"(1) [{registered_name,httpc_profile_sup},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165ad0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.151.0>">>, <<"(1) [{registered_name,httpc_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2165ae8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.150.0>">>, <<"(1) [{registered_name,ftp_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2165b00 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.149.0>">>, <<"(1) [{registered_name,inets_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{">>, <<>>, <<"x00002aaab2165b18 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.148.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165b30 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.147.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165b48 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.144.0>">>, <<"(1) [{registered_name,crypto_server},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165b60 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.143.0>">>, <<"(1) [{registered_name,crypto_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab2165b78 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.142.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165b90 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.141.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165ba8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.139.0>">>, <<"(1) [{registered_name,cb_couch_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165bc0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.138.0>">>, <<"(1) [{registered_name,ns_server_cluster_sup},{status,waiting},{initial_call,{proc_lib,">>, <<>>, <<"x00002aaab2165bd8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.78.0>">>, <<"(1) [{registered_name,'sink-ns_log'},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165bf0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.76.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165c08 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.75.0>">>, <<"(1) [{registered_name,'sink-disk_debug'},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165c20 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.73.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165c38 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.72.0>">>, <<"(1) [{registered_name,'sink-disk_couchdb'},{status,waiting},{initial_call,{proc_lib,in">>, <<>>, <<"x00002aaab2165c50 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.70.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165c68 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.69.0>">>, <<"(1) [{registered_name,'sink-disk_views'},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165c80 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.67.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165c98 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.66.0>">>, <<"(1) [{registered_name,'sink-disk_error'},{status,waiting},{initial_call,{proc_lib,init">>, <<>>, <<"x00002aaab2165cb0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.64.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165cc8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.63.0>">>, <<"(1) [{registered_name,disk_log_server},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165ce0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.62.0>">>, <<"(1) [{registered_name,disk_log_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165cf8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.61.0>">>, <<"(1) [{registered_name,'sink-disk_default'},{status,waiting},{initial_call,{proc_lib,in">>, <<>>, <<"x00002aaab2165d10 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.59.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165d28 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.58.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165d40 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.56.0>">>, <<"(1) [{registered_name,timer_server},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2165d58 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.55.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165d70 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.54.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165d88 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.53.0>">>, <<"(1) [{registered_name,cpu_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2165da0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.52.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2165db8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.51.0>">>, <<"(1) [{registered_name,memsup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{bac">>, <<>>, <<"x00002aaab2165dd0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.50.0>">>, <<"(1) [{registered_name,disksup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2165de8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.49.0>">>, <<"(1) [{registered_name,os_mon_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab2165e00 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.48.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165e18 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.47.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165e30 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.44.0>">>, <<"(1) [{registered_name,release_handler},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165e48 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.43.0>">>, <<"(1) [{registered_name,overload},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab2165e60 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.42.0>">>, <<"(1) [{registered_name,alarm_handler},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165e78 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.41.0>">>, <<"(1) [{registered_name,sasl_safe_sup},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2165e90 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.40.0>">>, <<"(1) [{registered_name,sasl_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{b">>, <<>>, <<"x00002aaab2165ea8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.39.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165ec0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.38.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165ed8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.34.0>">>, <<"(1) [{registered_name,ale},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtr">>, <<>>, <<"x00002aaab2165ef0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.33.0>">>, <<"(1) [{registered_name,ale_dynamic_sup},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165f08 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.32.0>">>, <<"(1) [{registered_name,ale_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2165f20 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.31.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab2165f38 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.30.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165f50 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.25.0>">>, <<"(1) [{registered_name,kernel_safe_sup},{status,waiting},{initial_call,{proc_lib,init_p">>, <<>>, <<"x00002aaab2165f68 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.24.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165f80 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.23.0>">>, <<"(1) [{registered_name,user},{status,waiting},{initial_call,{erlang,apply,2}},{backtrac">>, <<>>, <<"x00002aaab2165f98 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.22.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab2165fb0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.21.0>">>, <<"(1) [{registered_name,standard_error},{status,waiting},{initial_call,{erlang,apply,2}}">>, <<>>, <<"x00002aaab2165fc8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.20.0>">>, <<"(1) [{registered_name,standard_error_sup},{status,waiting},{initial_call,{proc_lib,ini">>, <<>>, <<"x00002aaab2165fe0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.19.0>">>, <<"(1) [{registered_name,code_server},{status,waiting},{initial_call,{erlang,apply,2}},{b">>, <<>>, <<"x00002aaab2165ff8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.18.0>">>, <<"(1) [{registered_name,file_server_2},{status,waiting},{initial_call,{proc_lib,init_p,5">>, <<>>, <<"x00002aaab2166010 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.17.0>">>, <<"(1) [{registered_name,global_group},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2166028 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.16.0>">>, <<"(1) [{registered_name,inet_db},{status,waiting},{initial_call,{proc_lib,init_p,5}},{ba">>, <<>>, <<"x00002aaab2166040 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.15.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2166058 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.14.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{erlang,apply,2}},{backtrace,">>, <<>>, <<"x00002aaab2166070 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.13.0>">>, <<"(1) [{registered_name,global_name_server},{status,waiting},{initial_call,{proc_lib,ini">>, <<>>, <<"x00002aaab2166088 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.12.0>">>, <<"(1) [{registered_name,rex},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtr">>, <<>>, <<"x00002aaab21660a0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.11.0>">>, <<"(1) [{registered_name,kernel_sup},{status,waiting},{initial_call,{proc_lib,init_p,5}},">>, <<>>, <<"x00002aaab21660b8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.10.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{application_master,start_it,">>, <<>>, <<"x00002aaab21660d0 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.9.0>">>, <<"(1) [{registered_name,[]},{status,waiting},{initial_call,{proc_lib,init_p,5}},{backtra">>, <<>>, <<"x00002aaab21660e8 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.7.0>">>, <<"(1) [{registered_name,application_controller},{status,waiting},{initial_call,{erlang,a">>, <<>>, <<"x00002aaab2166100 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.6.0>">>, <<"(1) [{registered_name,error_logger},{status,waiting},{initial_call,{proc_lib,init_p,5}">>, <<>>, <<"x00002aaab2166118 Return addr 0x00002aaaaca81cf0 (diag_handler:'-do_diag_per_node/0-lc$^0/">>, <<"y(0) <0.3.0>">>, <<"(1) [{registered_name,erl_prim_loader},{status,waiting},{initial_call,{erlang,apply,2}">>, <<>>, <<"0x00002aaab2166130 Return addr 0x00002aaaaca7fa38 (diag_handler:do_diag_per_node/0 + 216)">>, <<"y(0) <0.0.0>">>, <<"(1) [{registered_name,init},{status,waiting},{initial_call,{otp_ring0,start,2}},{backt">>, <<>>, <<"x00002aaab2166148 Return addr 0x00002aaaab83e170 (rpc:'-handle_call_call/6-fun-0-'/5 + 192">>, <<"y(0) []">>,<<"y(1) []">>, <<"y(2) []">>, <<"(3) [{version,[{public_key,\"0.14\"},{lhttpc,\"1.3.0\"},{ale,\"8cffe61\"},{os_mon,\"2.2.8\"},{">>, <<"(4) [{buckets,[{'_vclock',[{'ns_1@10.1.2.30',{266,63501326980}},{'ns_1@127.0.0.1',{15,">>, <<"(5) [\"bucket_engine 2.0.0r-1065-rel Linux-x86_64\",\"couchbase-examples 2.0.0r-1065-rel ">>, <<"(6) [{public_key,\"0.14\"},{lhttpc,\"1.3.0\"},{ale,\"8cffe61\"},{os_mon,\"2.2.8\"},{couch_set_">>, <<>>, <<"0x00002aaab2166188 Return addr 0x0000000000875c98 ()">>, <<"y(0) Catch 0x00002aaaab83e170 (rpc:'-handle_call_call/6-fun-0-'/5 + 192)">>, <<"y(1) []">>,<<"y(2) []">>, <<"y(3) []">>,<<"y(4) <0.12.0>">>,<<>>]}, {error_handler,error_handler}, {garbage_collection, [{min_bin_vheap_size,46368}, {min_heap_size,233}, {fullsweep_after,0}, {minor_gcs,0}]}, {heap_size,121393}, {total_heap_size,121393}, {links,[]}, {memory,972008}, {message_queue_len,0}, {reductions,125436}, {trap_exit,false}]}]}, {memory,{4040077312,3460702208,{<0.12.0>,4114768}}}, {disk, [{"/",55007284,15}, {"/boot",101086,21}, {"/dev/shm",1972692,0}]}, {active_tasks,[]}]}] nodes_info = [{struct, [{systemStats, {struct, [{cpu_utilization_rate,100.0}, {swap_total,6140452864}, {swap_used,102400}]}}, {interestingStats,{struct,[]}}, {uptime,<<"673">>}, {memoryTotal,4040077312}, {memoryFree,579375104}, {mcdMemoryReserved,3082}, {mcdMemoryAllocated,3082}, {couchApiBase,<<"http://10.1.2.30:8092/">>}, {otpNode,<<"ns_1@10.1.2.30">>}, {otpCookie,<<"olcyvmepmlevmwcj">>}, {clusterMembership,<<"active">>}, {status,<<"warmup">>}, {thisNode,true}, {hostname,<<"10.1.2.30:8091">>}, {clusterCompatibility,1}, {version,<<"2.0.0r-1065-rel-enterprise">>}, {os,<<"x86_64-unknown-linux-gnu">>}, {ports,{struct,[{proxy,11211},{direct,11210}]}}]}] buckets = [{"default", [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,1435500544}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@10.1.2.30']}]}] logs: ------------------------------- logs_node (log): ------------------------------- [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,crypto_sup} started: [{pid,<0.144.0>}, {name,crypto_server}, {mfargs,{crypto_server,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: crypto started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: public_key started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.150.0>}, {name,ftp_sup}, {mfargs,{ftp_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_profile_sup} started: [{pid,<0.153.0>}, {name,httpc_manager}, {mfargs, {httpc_manager,start_link, [default,only_session_cookies,inets]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.152.0>}, {name,httpc_profile_sup}, {mfargs, {httpc_profile_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.154.0>}, {name,httpc_handler_sup}, {mfargs,{httpc_handler_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.151.0>}, {name,httpc_sup}, {mfargs, {httpc_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.155.0>}, {name,httpd_sup}, {mfargs,{httpd_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.156.0>}, {name,tftp_sup}, {mfargs,{tftp_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: inets started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: oauth started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.162.0>}, {name,ssl_manager}, {mfargs,{ssl_manager,start_link,[[]]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.163.0>}, {name,ssl_connection}, {mfargs,{ssl_connection_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: ssl started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,lhttpc_sup} started: [{pid,<0.168.0>}, {name,lhttpc_manager}, {mfargs, {lhttpc_manager,start_link, [[{name,lhttpc_manager}]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: lhttpc started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mochiweb started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: couch_set_view started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mapreduce started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.175.0>}, {name,couch_config}, {mfargs, {couch_server_sup,couch_config_start_link_wrapper, [["/opt/couchbase/etc/couchdb/default.ini", "/opt/couchbase/etc/couchdb/default.d/capi.ini", "/opt/couchbase/etc/couchdb/default.d/geocouch.ini", "/opt/couchbase/etc/couchdb/local.ini", "/opt/couchbase/etc/couchdb/local.d/mccouch.ini"], <0.175.0>]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.178.0>}, {name,collation_driver}, {mfargs,{couch_drv,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.179.0>}, {name,couch_task_events}, {mfargs, {gen_event,start_link,[{local,couch_task_events}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.180.0>}, {name,couch_task_status}, {mfargs,{couch_task_status,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.181.0>}, {name,couch_file_write_guard}, {mfargs,{couch_file_write_guard,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.182.0>}, {name,couch_server}, {mfargs,{couch_server,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.183.0>}, {name,couch_compress_types}, {mfargs,{couch_compress_types,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.184.0>}, {name,couch_db_update_event}, {mfargs, {gen_event,start_link,[{local,couch_db_update}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.185.0>}, {name,couch_replication_event}, {mfargs, {gen_event,start_link,[{local,couch_replication}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.186.0>}, {name,couch_replication_supervisor}, {mfargs,{couch_rep_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.187.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.190.0>}, {name,couch_main_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_main_index_barrier, "max_parallel_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.191.0>}, {name,couch_replica_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_replica_index_barrier, "max_parallel_replica_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.192.0>}, {name,couch_access_log}, {mfargs,{couch_access_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.177.0>}, {name,couch_primary_services}, {mfargs,{couch_primary_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:11] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.196.0>}, {name,couch_db_update_notifier_sup}, {mfargs,{couch_db_update_notifier_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.197.0>}, {name,auth_cache}, {mfargs,{couch_auth_cache,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.211.0>}, {name,os_daemons}, {mfargs,{couch_os_daemons,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.213.0>}, {name,mc_couch_events}, {mfargs, {gen_event,start_link,[{local,mc_couch_events}]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.214.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.215.0>}, {name,mc_conn_sup}, {mfargs,{mc_conn_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.212.0>}, {name,mc_daemon}, {mfargs,{mc_sup,start_link,[11213]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [couchdb:info] [2012-04-10 18:19:12] [nonode@nohost:<0.214.0>:couch_log:info:39] mccouch is listening on port 11213 [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.216.0>}, {name,vhosts}, {mfargs,{couch_httpd_vhost,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.217.0>}, {name,set_view_manager}, {mfargs,{couch_set_view,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.219.0>}, {name,spatial_manager}, {mfargs,{couch_spatial,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.220.0>}, {name,index_merger_pool}, {mfargs, {lhttpc_manager,start_link, [[{connection_timeout,90000}, {pool_size,10000}, {name,couch_index_merger_connection_pool}]]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.221.0>}, {name,query_servers}, {mfargs,{couch_query_servers,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.223.0>}, {name,view_manager}, {mfargs,{couch_view,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.225.0>}, {name,httpd}, {mfargs,{couch_httpd,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [couchdb:info] [2012-04-10 18:19:12] [nonode@nohost:cb_couch_sup:couch_log:info:39] Apache CouchDB has started on http://0.0.0.0:8092/ [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.242.0>}, {name,external_manager}, {mfargs,{couch_external_manager,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.243.0>}, {name,uuids}, {mfargs,{couch_uuids,start,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.195.0>}, {name,couch_secondary_services}, {mfargs,{couch_secondary_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,cb_couch_sup} started: [{pid,<0.176.0>}, {name,couch_app}, {mfargs, {couch_app,start, [fake, ["/opt/couchbase/etc/couchdb/default.ini", "/opt/couchbase/etc/couchdb/local.ini"]]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_cluster_sup} started: [{pid,<0.139.0>}, {name,cb_couch_sup}, {mfargs,{cb_couch_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,supervisor}] [error_logger:error] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,couch_primary_services} Context: child_terminated Reason: normal Offender: [{pid,<0.187.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.245.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [ns_server:info] [2012-04-10 18:19:12] [nonode@nohost:ns_server_cluster_sup:log_os_info:start_link:25] OS type: {unix,linux} Version: {2,6,18} Runtime info: [{otp_release,"R15B"}, {erl_version,"5.9"}, {erl_version_long, "Erlang R15B (erts-5.9) [source] [64-bit] [smp:4:4] [async-threads:16] [hipe] [kernel-poll:true]\n"}, {system_arch_raw,"x86_64-unknown-linux-gnu"}, {system_arch,"x86_64-unknown-linux-gnu"}, {localtime,{{2012,4,10},{18,19,12}}}, {memory, [{total,24148992}, {processes,4097637}, {processes_used,4096678}, {system,20051355}, {atom,347633}, {atom_used,344011}, {binary,41336}, {code,8366581}, {ets,694968}]}, {loaded, [ns_info,log_os_info,couch_config_writer,cb_init_loggers, couch_external_manager,mochiweb_acceptor,mochiweb_socket, mochiweb_socket_server,mochilists,mochiweb_http,eval_bits, couch_view,couch_query_servers,couch_spatial, couch_set_view,couch_httpd,inet_tcp,couch_httpd_vhost, gen_tcp,mc_conn_sup,mc_tcp_listener,mc_sup, couch_os_daemons,snappy,couch_compress,ejson,couch_doc, couch_db_update_notifier,couch_btree,couch_ref_counter, crypto,couch_uuids,couch_db_updater,couch_db, couch_auth_cache,couch_db_update_notifier_sup, couch_secondary_sup,couch_access_log,couch_index_barrier, couch_event_sup,couch_log,couch_rep_sup, couch_compress_types,httpd_util,filelib,couch_file, couch_file_write_guard,couch_task_status,erl_ddll, couch_drv,couch_primary_sup,couch_server,string,re, couch_util,couch_config,couch_server_sup,mochiweb_sup, mochiweb_app,ssl,lhttpc_manager,lhttpc_sup,lhttpc, ssl_connection_sup,ssl_session_cache,ssl_certificate_db, ssl_manager,ssl_sup,ssl_app,tftp_sup,httpd_sup, httpc_handler_sup,httpc_cookie,inets,httpc_manager,httpc, httpc_profile_sup,httpc_sup,ftp_sup,inets_sup,inets_app, ale_default_formatter,crypto_server,crypto_sup,crypto_app, couch_app,cb_couch_sup,ns_server_cluster_sup,otp_internal, misc,'ale_logger-views','ale_logger-cluster', 'ale_logger-rebalance','ale_logger-stats', 'ale_logger-ns_doctor','ale_logger-menelaus', 'ale_logger-user','ale_logger-ns_server', 'ale_logger-couchdb',ns_log_sink,disk_log_sup, disk_log_server,disk_log_1,disk_log,ale_disk_sink, ns_server,timer,io_lib_fread,cpu_sup,memsup,disksup, os_mon,io,sasl_report,release_handler,calendar,overload, alarm_handler,log_mf_h,sasl_report_tty_h,sasl, ale_error_logger_handler,'ale_logger-ale_logger', 'ale_logger-error_logger',beam_opcodes,beam_dict,beam_asm, beam_validator,beam_flatten,beam_trim,beam_receive, beam_bsm,beam_peep,beam_dead,beam_split,beam_type, beam_bool,beam_clean,beam_utils,beam_jump,beam_block, v3_codegen,v3_life,v3_kernel,sys_core_dsetel,erl_bifs, sys_core_fold,cerl_trees,sys_core_inline,core_lib,cerl, v3_core,erl_bits,erl_expand_records,sys_pre_expand,sofs, erl_internal,sets,ordsets,erl_lint,compile, dynamic_compile,ale_utils,io_lib_pretty,io_lib_format, io_lib,ale_codegen,dict,ale,ale_dynamic_sup,ale_sup, ale_app,ns_bootstrap,file_io_server,orddict,erl_eval,c, error_logger_tty_h,queue,kernel_config,user,user_sup, supervisor_bridge,standard_error,ram_file,file,beam_lib, code_server,unicode,packages,hipe_unified_loader,gb_sets, ets,binary,code,file_server,net_kernel,global_group, erl_distribution,filename,os,inet_parse,inet,inet_udp, inet_config,inet_db,global,gb_trees,rpc,supervisor,kernel, application_master,sys,application,gen_server,erl_parse, proplists,erl_scan,lists,application_controller,proc_lib, gen,gen_event,error_logger,heart,error_handler,erlang, erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,init, otp_ring0]}, {applications, [{public_key,"Public key infrastructure","0.14"}, {lhttpc,"Lightweight HTTP Client","1.3.0"}, {ale,"Another Logger for Erlang","8cffe61"}, {os_mon,"CPO CXC 138 46","2.2.8"}, {couch_set_view,"Set views","1.2.0a-051c820-git"}, {inets,"INETS CXC 138 49","5.8"}, {couch,"Apache CouchDB","1.2.0a-051c820-git"}, {mapreduce,"MapReduce using V8 JavaScript engine","1.0.0"}, {kernel,"ERTS CXC 138 10","2.15"}, {crypto,"CRYPTO version 2","2.1"}, {ssl,"Erlang/OTP SSL application","5.0"}, {sasl,"SASL CXC 138 11","2.2"}, {ns_server,"Couchbase server", "2.0.0r-1065-rel-enterprise"}, {mochiweb,"MochiMedia Web Server","1.4.1"}, {oauth,"Erlang OAuth implementation","7d85d3ef"}, {stdlib,"ERTS CXC 138 10","1.18"}]}, {pre_loaded, [erlang,erl_prim_loader,prim_zip,zlib,prim_file,prim_inet, init,otp_ring0]}, {process_count,148}, {node,nonode@nohost}, {nodes,[]}, {registered, [couch_drv,mc_sup,couch_server,lhttpc_manager, release_handler,overload,couch_external_manager, couch_compress_types,erl_prim_loader,tftp_sup, alarm_handler,'sink-ns_log',error_logger, 'sink-disk_debug','sink-disk_couchdb',couch_auth_cache, 'sink-disk_views',couch_replica_index_barrier, 'sink-disk_error',couch_main_index_barrier, couch_replication,couch_task_status,couch_task_events, couch_access_log,mc_couch_events,mc_conn_sup,user, standard_error,httpd_sup,ssl_connection_sup, couch_os_daemons,ssl_manager,couch_view,couch_log, timer_server,couch_httpd_vhost,couch_rep_sup,cb_couch_sup, couch_config,inet_db,ssl_sup,couch_server_sup, couch_file_write_guard,init,couch_uuids,rex, ns_server_cluster_sup,couch_db_update_notifier_sup, kernel_sup,httpc_sup,global_group,global_name_server, httpc_profile_sup,file_server_2,httpc_manager, httpc_handler_sup,ftp_sup,sasl_safe_sup,lhttpc_sup, couch_set_view,inets_sup,disk_log_sup,crypto_server, disk_log_server,crypto_sup,code_server, 'sink-disk_default',mochiweb_sup,os_mon_sup, application_controller,ale_sup,couch_secondary_services, couch_primary_services,standard_error_sup,couch_db_update, cpu_sup,ale_dynamic_sup,memsup,disksup,sasl_sup, couch_query_servers,couch_index_merger_connection_pool, kernel_safe_sup,ale,couch_spatial,couch_httpd]}, {cookie,nocookie}, {wordsize,8}, {wall_clock,2}] [ns_server:info] [2012-04-10 18:19:12] [nonode@nohost:ns_server_cluster_sup:log_os_info:start_link:27] Manifest: ["bucket_engine 2.0.0r-1065-rel Linux-x86_64", "couchbase-examples 2.0.0r-1065-rel Linux-x86_64", "couchbase-python-client 2.0.0r-1065-rel Linux-x86_64", "couchbase-server 2.0.0r-1065-rel Linux-x86_64", "couchdb 2.0.0r-1065-rel Linux-x86_64", "couchdbx-app 2.0.0r-1065-rel Linux-x86_64", "couchstore 2.0.0r-1065-rel Linux-x86_64", "ep-engine 2.0.0r-1065-rel Linux-x86_64", "geocouch 2.0.0r-1065-rel Linux-x86_64","icu4c 2.0.0r-1065-rel Linux-x86_64", "libconflate 2.0.0r-1065-rel Linux-x86_64", "libcouchbase 2.0.0r-1065-rel Linux-x86_64", "libmemcached 2.0.0r-1065-rel Linux-x86_64", "libvbucket 2.0.0r-1065-rel Linux-x86_64", "manifest 2.0.0r-1065-rel Linux-x86_64", "manifest-master 2.0.0r-1065-rel Linux-x86_64", "mccouch 2.0.0r-1065-rel Linux-x86_64", "membase-cli 2.0.0r-1065-rel Linux-x86_64", "memcached 2.0.0r-1065-rel Linux-x86_64", "memcachetest 2.0.0r-1065-rel Linux-x86_64", "moxi 2.0.0r-1065-rel Linux-x86_64","ns_server 2.0.0r-1065-rel Linux-x86_64", "otp 2.0.0r-1065-rel Linux-x86_64","portsigar 2.0.0r-1065-rel Linux-x86_64", "sigar 2.0.0r-1065-rel Linux-x86_64","snappy 2.0.0r-1065-rel Linux-x86_64", "testrunner 2.0.0r-1065-rel Linux-x86_64","tlm 2.0.0r-1065-rel Linux-x86_64", "v8 2.0.0r-1065-rel Linux-x86_64", "workload-generator 2.0.0r-1065-rel Linux-x86_64"] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,crypto_sup} started: [{pid,<0.143.0>}, {name,crypto_server}, {mfargs,{crypto_server,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: crypto started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: public_key started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:12] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.149.0>}, {name,ftp_sup}, {mfargs,{ftp_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_profile_sup} started: [{pid,<0.152.0>}, {name,httpc_manager}, {mfargs, {httpc_manager,start_link, [default,only_session_cookies,inets]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.151.0>}, {name,httpc_profile_sup}, {mfargs, {httpc_profile_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,httpc_sup} started: [{pid,<0.153.0>}, {name,httpc_handler_sup}, {mfargs,{httpc_handler_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.150.0>}, {name,httpc_sup}, {mfargs, {httpc_sup,start_link, [[{httpc,{default,only_session_cookies}}]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.154.0>}, {name,httpd_sup}, {mfargs,{httpd_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inets_sup} started: [{pid,<0.155.0>}, {name,tftp_sup}, {mfargs,{tftp_sup,start_link,[[]]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: inets started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: oauth started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.161.0>}, {name,ssl_manager}, {mfargs,{ssl_manager,start_link,[[]]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ssl_sup} started: [{pid,<0.162.0>}, {name,ssl_connection}, {mfargs,{ssl_connection_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,4000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: ssl started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,lhttpc_sup} started: [{pid,<0.167.0>}, {name,lhttpc_manager}, {mfargs, {lhttpc_manager,start_link, [[{name,lhttpc_manager}]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: lhttpc started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mochiweb started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: couch_set_view started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mapreduce started_at: nonode@nohost [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.174.0>}, {name,couch_config}, {mfargs, {couch_server_sup,couch_config_start_link_wrapper, [["/opt/couchbase/etc/couchdb/default.ini", "/opt/couchbase/etc/couchdb/default.d/capi.ini", "/opt/couchbase/etc/couchdb/default.d/geocouch.ini", "/opt/couchbase/etc/couchdb/local.ini", "/opt/couchbase/etc/couchdb/local.d/mccouch.ini"], <0.174.0>]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.177.0>}, {name,collation_driver}, {mfargs,{couch_drv,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.178.0>}, {name,couch_task_events}, {mfargs, {gen_event,start_link,[{local,couch_task_events}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.179.0>}, {name,couch_task_status}, {mfargs,{couch_task_status,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.180.0>}, {name,couch_file_write_guard}, {mfargs,{couch_file_write_guard,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.181.0>}, {name,couch_server}, {mfargs,{couch_server,sup_start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.182.0>}, {name,couch_compress_types}, {mfargs,{couch_compress_types,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.183.0>}, {name,couch_db_update_event}, {mfargs, {gen_event,start_link,[{local,couch_db_update}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.184.0>}, {name,couch_replication_event}, {mfargs, {gen_event,start_link,[{local,couch_replication}]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.185.0>}, {name,couch_replication_supervisor}, {mfargs,{couch_rep_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.186.0>}, {name,couch_log}, {mfargs,{couch_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.188.0>}, {name,couch_main_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_main_index_barrier, "max_parallel_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.189.0>}, {name,couch_replica_index_barrier}, {mfargs, {couch_index_barrier,start_link, [couch_replica_index_barrier, "max_parallel_replica_indexers"]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_primary_services} started: [{pid,<0.191.0>}, {name,couch_access_log}, {mfargs,{couch_access_log,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_server_sup} started: [{pid,<0.176.0>}, {name,couch_primary_services}, {mfargs,{couch_primary_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.195.0>}, {name,couch_db_update_notifier_sup}, {mfargs,{couch_db_update_notifier_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.196.0>}, {name,auth_cache}, {mfargs,{couch_auth_cache,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.206.0>}, {name,os_daemons}, {mfargs,{couch_os_daemons,start_link,[]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.208.0>}, {name,mc_couch_events}, {mfargs, {gen_event,start_link,[{local,mc_couch_events}]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.209.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.210.0>}, {name,mc_conn_sup}, {mfargs,{mc_conn_sup,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,couch_secondary_services} started: [{pid,<0.207.0>}, {name,mc_daemon}, {mfargs,{mc_sup,start_link,[11213]}}, {restart_type,permanent}, {shutdown,brutal_kill}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.209.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.212.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.209.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.212.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.212.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.213.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.213.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.213.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.214.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.214.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.214.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.215.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.215.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.215.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.216.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.216.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.217.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.216.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.217.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.217.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.218.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.218.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.218.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.219.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.219.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.220.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.219.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.220.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.220.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.221.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.221.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.221.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.222.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.222.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.222.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.223.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.223.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.223.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.224.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.224.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.224.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.225.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.225.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.225.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.226.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.226.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.226.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.227.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.227.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.227.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.228.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.228.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.228.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.229.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] Error in process <0.229.0> with exit value: {{badmatch,{error,eaddrinuse}},[{mc_tcp_listener,init,1,[{file,"src/mc_tcp_listener.erl"},{line,18}]}]} [error_logger:error] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,mc_sup} Context: child_terminated Reason: {{badmatch,{error,eaddrinuse}}, [{mc_tcp_listener,init,1, [{file,"src/mc_tcp_listener.erl"}, {line,18}]}]} Offender: [{pid,<0.229.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:13] [nonode@nohost:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mc_sup} started: [{pid,<0.230.0>}, {name,mc_tcp_listener}, {mfargs,{mc_tcp_listener,start_link,[11213]}}, {restart_type,permanent}, [error_logger:info] [2012-04-10 18:19:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,inet_gethost_native_sup} started: [{pid,<0.475.0>},{mfa,{inet_gethost_native,init,[[]]}}] [error_logger:info] [2012-04-10 18:19:15] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_safe_sup} started: [{pid,<0.474.0>}, {name,inet_gethost_native_sup}, {mfargs,{inet_gethost_native,start_link,[]}}, {restart_type,temporary}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:19:16] [ns_1@127.0.0.1:ns_config_events:ns_port_sup:terminate_port:129] unsupervising port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR",[]}, {"MOXI_SASL_PLAIN_PWD",[]}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [ns_server:info] [2012-04-10 18:19:16] [ns_1@127.0.0.1:<0.393.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:19:16] [ns_1@127.0.0.1:<0.393.0>:ns_port_server:log:166] moxi<0.393.0>: EOL on stdin. Exiting [ns_server:info] [2012-04-10 18:19:16] [ns_1@127.0.0.1:ns_config_events:ns_port_sup:launch_port:74] supervising port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [error_logger:info] [2012-04-10 18:19:16] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.480.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:19:17] [ns_1@127.0.0.1:<0.481.0>:ns_port_server:log:166] moxi<0.481.0>: 2012-04-10 18:19:16: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.481.0>: 2012-04-10 18:19:16: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [menelaus:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:<0.395.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.555.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-04-10 18:19:32] [ns_1@127.0.0.1:<0.490.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:713] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=1435500544;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;alog_path=/opt/couchbase/var/lib/couchdb/default/access.log;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.561.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.562.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.567.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.570.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.571.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.572.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.574.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.576.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.577.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.560.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.582.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found,no_db_file} [views:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.605.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:<0.481.0>:ns_port_server:log:166] moxi<0.481.0>: 2012-04-10 18:19:34: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.481.0>: "name": "default", moxi<0.481.0>: "nodeLocator": "vbucket", moxi<0.481.0>: "saslPassword": "", moxi<0.481.0>: "nodes": [{ moxi<0.481.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.481.0>: "replication": 0, moxi<0.481.0>: "clusterMembership": "active", moxi<0.481.0>: "status": "warmup", moxi<0.481.0>: "thisNode": true, moxi<0.481.0>: "hostname": "127.0.0.1:8091", moxi<0.481.0>: "clusterCompatibility": 1, moxi<0.481.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.481.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.481.0>: "ports": { moxi<0.481.0>: "proxy": 11211, moxi<0.481.0>: "direct": 11210 moxi<0.481.0>: } moxi<0.481.0>: }], moxi<0.481.0>: "vBucketServerMap": { moxi<0.481.0>: "hashAlgorithm": "CRC", moxi<0.481.0>: "numReplicas": 1, moxi<0.481.0>: "serverList": ["127.0.0.1:11210"], moxi<0.481.0>: "vBucketMap": [] moxi<0.481.0>: } moxi<0.481.0>: }) [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Warning: data directory does not exist, /opt/couchbase/var/lib/couchdb/default memcached<0.396.0>: Failed to load mutation log, falling back to key dump memcached<0.396.0>: Warning: data directory is empty, /opt/couchbase/var/lib/couchdb/default memcached<0.396.0>: metadata loaded in 206 usec memcached<0.396.0>: Warning: data directory is empty, /opt/couchbase/var/lib/couchdb/default memcached<0.396.0>: warmup completed in 350 usec memcached<0.396.0>: Extension support isn't implemented in this version of bucket_engine [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.548.0>} [user:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:312] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-04-10 18:19:32] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.481.0>:ns_port_server:log:166] moxi<0.481.0>: 2012-04-10 18:19:34: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.481.0>: "name": "default", moxi<0.481.0>: "nodeLocator": "vbucket", moxi<0.481.0>: "saslPassword": "", moxi<0.481.0>: "nodes": [{ moxi<0.481.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.481.0>: "replication": 0, moxi<0.481.0>: "clusterMembership": "active", moxi<0.481.0>: "status": "healthy", moxi<0.481.0>: "thisNode": true, moxi<0.481.0>: "hostname": "127.0.0.1:8091", moxi<0.481.0>: "clusterCompatibility": 1, moxi<0.481.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.481.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.481.0>: "ports": { moxi<0.481.0>: "proxy": 11211, moxi<0.481.0>: "direct": 11210 moxi<0.481.0>: } moxi<0.481.0>: }], moxi<0.481.0>: "vBucketServerMap": { moxi<0.481.0>: "hashAlgorithm": "CRC", moxi<0.481.0>: "numReplicas": 1, moxi<0.481.0>: "serverList": ["127.0.0.1:11210"], moxi<0.481.0>: "vBucketMap": [] moxi<0.481.0>: } moxi<0.481.0>: }) [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [views:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:19:33] [ns_1@127.0.0.1:<0.548.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:19:37] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:19:37] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.542.0>:couch_log:info:39] 10.1.2.49 - - PUT /default/_design/dev_test_view_on_100_docs-5c8b521 201 [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] Calling couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, {set_view_params,256,[],[],true}]) [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view spawned worker {<0.3278.0>,#Ref<0.0.0.28592>} to open set view group `_design/dev_test_view_on_100_docs-5c8b521`, set `default`, signature `ec9fc392e69d205495a444abcede2e4d`, new waiting list: [{<0.605.0>,#Ref<0.0.0.28591>}] [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Started undefined main set view group `default`, group `_design/dev_test_view_on_100_docs-5c8b521` [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3278.0>:couch_log:info:39] couch_set_view opener worker <0.3278.0> for set view group `_design/dev_test_view_on_100_docs-5c8b521`, set `default`, signature `ec9fc392e69d205495a444abcede2e4d`, finishing with reply {ok, <0.3280.0>} [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521`, linked PID <0.3281.0> stopped normally [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view set view group `_design/dev_test_view_on_100_docs-5c8b521`, set `default`, signature `ec9fc392e69d205495a444abcede2e4d`, opener worker {#Ref<0.0.0.28592>,<0.3278.0>} finished. Replying with {ok,<0.3280.0>} to waiting list: [{<0.605.0>,#Ref<0.0.0.28591>}] [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3291.0>:couch_log:info:39] Started undefined replica set view group `default`, group `_design/dev_test_view_on_100_docs-5c8b521` [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3291.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_100_docs-5c8b521`, linked PID <0.3292.0> stopped normally [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3291.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_100_docs-5c8b521`, configured with: 256 partitions no replica support initial active partitions [] initial passive partitions [] [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521`, configured with: 256 partitions replica support initial active partitions [] initial passive partitions [] [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, {set_view_params,256,[],[],true}]) returned ok in 31ms [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:393] Applying map to bucket default (ddoc _design/dev_test_view_on_100_docs-5c8b521): [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:399] Classified vbuckets for "default" (ddoc _design/dev_test_view_on_100_docs-5c8b521): Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:418] Calling couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13, 14,15,16,17,18,19,20,21,22,23, 24,25,26,27,28,29,30,31,32,33, 34,35,36,37,38,39,40,41,42,43, 44,45,46,47,48,49,50,51,52,53, 54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83, 84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102, 103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254, 255], [],[]]) [couchdb:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521`, partition states updated active partitions before: [] active partitions after: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] passive partitions before: [] passive partitions after: [] cleanup partitions before: [] cleanup partitions after: [] replica partitions before: [] replica partitions after: [] replicas on transfer before: [] replicas on transfer after: [] [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:419] couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28, 29,30,31,32,33,34,35,36,37,38,39,40,41, 42,43,44,45,46,47,48,49,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67, 68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102,103,104, 105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254, 255], [],[]]) returned ok in 31ms [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:423] Calling couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, []]) [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:424] couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, []]) returned ok in 0ms [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:425] Calling couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, []]) [views:info] [2012-04-10 18:19:43] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:426] couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_100_docs-5c8b521">>, []]) returned ok in 0ms [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Starting updater for set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.899.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_100_docs-5c8b521/_view/dev_test_view_on_100_docs-5c8b521?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3319.0>:couch_log:info:39] Updater for set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521` started Active partitions: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive partitions: [] Active partitions update seqs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,1},{11,0},{12,0},{13,1},{14,0},{15,0},{16,0},{17,1},{18,0},{19,0},{20,0},{21,0},{22,1},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,1},{35,0},{36,0},{37,1},{38,0},{39,0},{40,0},{41,0},{42,0},{43,1},{44,1},{45,0},{46,0},{47,0},{48,1},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,1},{56,0},{57,1},{58,0},{59,1},{60,1},{61,0},{62,1},{63,0},{64,0},{65,1},{66,0},{67,0},{68,0},{69,0},{70,1},{71,0},{72,1},{73,0},{74,0},{75,1},{76,1},{77,0},{78,0},{79,1},{80,1},{81,0},{82,0},{83,1},{84,1},{85,0},{86,0},{87,1},{88,0},{89,1},{90,1},{91,0},{92,0},{93,1},{94,1},{95,0},{96,0},{97,1},{98,0},{99,0},{100,0},{101,0},{102,1},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,1},{123,0},{124,0},{125,1},{126,0},{127,0},{128,0},{129,0},{130,2},{131,1},{132,1},{133,2},{134,0},{135,0},{136,0},{137,0},{138,1},{139,2},{140,2},{141,1},{142,0},{143,0},{144,1},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,1},{152,0},{153,1},{154,0},{155,0},{156,0},{157,0},{158,1},{159,0},{160,1},{161,1},{162,0},{163,0},{164,0},{165,0},{166,1},{167,1},{168,1},{169,1},{170,0},{171,0},{172,0},{173,0},{174,1},{175,1},{176,0},{177,0},{178,1},{179,1},{180,1},{181,1},{182,0},{183,0},{184,0},{185,0},{186,1},{187,1},{188,1},{189,1},{190,0},{191,0},{192,0},{193,0},{194,1},{195,1},{196,1},{197,1},{198,0},{199,0},{200,0},{201,0},{202,1},{203,1},{204,1},{205,1},{206,0},{207,0},{208,1},{209,1},{210,0},{211,0},{212,0},{213,0},{214,1},{215,1},{216,1},{217,1},{218,0},{219,0},{220,0},{221,0},{222,1},{223,1},{224,1},{225,2},{226,1},{227,0},{228,0},{229,1},{230,2},{231,1},{232,2},{233,1},{234,0},{235,0},{236,0},{237,0},{238,1},{239,2},{240,0},{241,0},{242,0},{243,1},{244,1},{245,0},{246,0},{247,0},{248,0},{249,1},{250,1},{251,0},{252,0},{253,1},{254,1},{255,0}] Active partitions indexed update seqs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions update seqs: [] Passive partitions indexed update seqs: [] Active partitions # docs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,1},{11,0},{12,0},{13,1},{14,0},{15,0},{16,0},{17,1},{18,0},{19,0},{20,0},{21,0},{22,1},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,1},{35,0},{36,0},{37,1},{38,0},{39,0},{40,0},{41,0},{42,0},{43,1},{44,1},{45,0},{46,0},{47,0},{48,1},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,1},{56,0},{57,1},{58,0},{59,1},{60,1},{61,0},{62,1},{63,0},{64,0},{65,1},{66,0},{67,0},{68,0},{69,0},{70,1},{71,0},{72,1},{73,0},{74,0},{75,1},{76,1},{77,0},{78,0},{79,1},{80,1},{81,0},{82,0},{83,1},{84,1},{85,0},{86,0},{87,1},{88,0},{89,1},{90,1},{91,0},{92,0},{93,1},{94,1},{95,0},{96,0},{97,1},{98,0},{99,0},{100,0},{101,0},{102,1},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,1},{123,0},{124,0},{125,1},{126,0},{127,0},{128,0},{129,0},{130,2},{131,1},{132,1},{133,2},{134,0},{135,0},{136,0},{137,0},{138,1},{139,2},{140,2},{141,1},{142,0},{143,0},{144,1},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,1},{152,0},{153,1},{154,0},{155,0},{156,0},{157,0},{158,1},{159,0},{160,1},{161,1},{162,0},{163,0},{164,0},{165,0},{166,1},{167,1},{168,1},{169,1},{170,0},{171,0},{172,0},{173,0},{174,1},{175,1},{176,0},{177,0},{178,1},{179,1},{180,1},{181,1},{182,0},{183,0},{184,0},{185,0},{186,1},{187,1},{188,1},{189,1},{190,0},{191,0},{192,0},{193,0},{194,1},{195,1},{196,1},{197,1},{198,0},{199,0},{200,0},{201,0},{202,1},{203,1},{204,1},{205,1},{206,0},{207,0},{208,1},{209,1},{210,0},{211,0},{212,0},{213,0},{214,1},{215,1},{216,1},{217,1},{218,0},{219,0},{220,0},{221,0},{222,1},{223,1},{224,1},{225,2},{226,1},{227,0},{228,0},{229,1},{230,2},{231,1},{232,2},{233,1},{234,0},{235,0},{236,0},{237,0},{238,1},{239,2},{240,0},{241,0},{242,0},{243,1},{244,1},{245,0},{246,0},{247,0},{248,0},{249,1},{250,1},{251,0},{252,0},{253,1},{254,1},{255,0}] Active partitions # deleted docs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions # docs: [] Passive partitions # deleted docs: [] Replicas to transfer: [] [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/0 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/1 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/2 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/3 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/4 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/5 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/6 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/7 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/8 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/9 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/10 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/11 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/12 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/13 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/14 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/15 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/16 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/17 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/18 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/19 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/20 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/21 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/22 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/23 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/24 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/25 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/26 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/27 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/28 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/29 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/30 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/31 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/32 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/33 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/34 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/35 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/36 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/37 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/38 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/39 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/40 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/41 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/42 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/43 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/44 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/45 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/46 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/47 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/48 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/49 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/50 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/51 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/52 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/53 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/54 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/55 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/56 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/57 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/58 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/59 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/60 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/61 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/62 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/63 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/64 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/65 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/66 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/67 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/68 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/69 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/70 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/71 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/72 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/73 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/74 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/75 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/76 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/77 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/78 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/79 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/80 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/81 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/82 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/83 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/84 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/85 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/86 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/87 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/88 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/89 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/90 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/91 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/92 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/93 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/94 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/95 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/96 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/97 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/98 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/99 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/100 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/101 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/102 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/103 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/104 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/105 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/106 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/107 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/108 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/109 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/110 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/111 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/112 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/113 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/114 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/115 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/116 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/117 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/118 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/119 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/120 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/121 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/122 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/123 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/124 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/125 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/126 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/127 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/128 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/129 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/130 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/131 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/132 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/133 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/134 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/135 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/136 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/137 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/138 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/139 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/140 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/141 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/142 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/143 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/144 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/145 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/146 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/147 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/148 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/149 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/150 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/151 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/152 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/153 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/154 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/155 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/156 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/157 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/158 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/159 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/160 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/161 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/162 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/163 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/164 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/165 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/166 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/167 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/168 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/169 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/170 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/171 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/172 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/173 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/174 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/175 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/176 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/177 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/178 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/179 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/180 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/181 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/182 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/183 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/184 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/185 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/186 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/187 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/188 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/189 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/190 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/191 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/192 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/193 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/194 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/195 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/196 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/197 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/198 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/199 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/200 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/201 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/202 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/203 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/204 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/205 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/206 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/207 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/208 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/209 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/210 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/211 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/212 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/213 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/214 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/215 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/216 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/217 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/218 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/219 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/220 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/221 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/222 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/223 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/224 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/225 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/226 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/227 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/228 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/229 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/230 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/231 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/232 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/233 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/234 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/235 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/236 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/237 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/238 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/239 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/240 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/241 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/242 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/243 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/244 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/245 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/246 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/247 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/248 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/249 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/250 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/251 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/252 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/253 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/254 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3325.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/255 to update main set view group `_design/dev_test_view_on_100_docs-5c8b521` from set `default` [couchdb:info] [2012-04-10 18:19:48] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521`, updater finished Indexing time: 0.196 seconds Blocked time: 0.000 seconds Inserted IDs: 100 Deleted IDs: 0 Inserted KVs: 100 Deleted KVs: 0 Cleaned KVs: 0 [couchdb:info] [2012-04-10 18:19:58] [ns_1@127.0.0.1:<0.909.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_100_docs-5c8b521/_view/dev_test_view_on_100_docs-5c8b521?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:20:12] [ns_1@127.0.0.1:<0.3316.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_100_docs-5c8b521 200 [couchdb:info] [2012-04-10 18:20:12] [ns_1@127.0.0.1:<0.3359.0>:couch_log:info:39] 10.1.2.49 - - DELETE /default/_design/dev_test_view_on_100_docs-5c8b521 200 [couchdb:info] [2012-04-10 18:20:12] [ns_1@127.0.0.1:<0.3280.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_100_docs-5c8b521`, terminating with reason: normal [couchdb:info] [2012-04-10 18:20:12] [ns_1@127.0.0.1:<0.3291.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_100_docs-5c8b521`, terminating with reason: shutdown [user:info] [2012-04-10 18:20:12] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:350] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [couchdb:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] Deleting index files for set `default` because database partition `default/master` was deleted [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/100">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/101">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/102">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/103">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/104">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/105">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/106">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/107">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/108">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/109">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/110">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/111">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/112">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/113">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/114">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/115">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/116">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/117">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/118">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/119">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/120">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/121">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/122">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/123">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/124">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/125">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/126">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/127">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/128">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/137">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/138">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/139">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/140">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/141">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/142">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/143">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/144">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/145">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/146">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/147">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/148">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/149">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/150">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/151">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/152">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/153">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/154">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/155">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/156">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/157">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/158">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/159">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/160">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/161">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/162">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/163">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/164">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/165">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/166">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/167">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/168">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/169">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/170">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/171">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/181">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/182">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/183">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Shutting down tap connections! [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/184">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/185">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/186">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/187">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/188">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/189">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/190">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/191">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/192">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/193">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/194">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/195">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/196">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/197">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/198">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/199">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/200">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/201">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/202">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/203">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/204">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/205">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/206">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/207">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/208">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/209">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/210">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/211">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/212">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/213">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/222">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/223">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/224">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/225">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/226">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/227">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/228">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/229">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/230">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/231">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/232">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/233">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/234">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/235">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/236">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/237">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/238">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/239">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/240">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/241">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/242">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/243">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/244">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/245">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/246">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/247">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/248">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/249">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/250">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/251">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/252">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/253">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/254">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/255">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/52">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/53">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/54">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/55">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/56">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/57">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/58">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/59">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/60">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/61">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/62">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/63">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/64">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/65">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/66">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/67">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/68">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/69">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/70">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/71">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/72">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/73">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/74">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/75">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/76">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/77">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/78">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/79">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/80">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/81">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/82">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/83">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/84">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/85">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/95">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/96">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/97">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/98">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/99">>: ok [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:<0.345.0>:ns_orchestrator:idle:345] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:<0.3682.0>:ns_port_sup:restart_port:134] restarting port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:<0.481.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:<0.481.0>:ns_port_server:log:166] moxi<0.481.0>: EOL on stdin. Exiting [menelaus:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:<0.3368.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-04-10 18:20:13] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.3683.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:20:14] [ns_1@127.0.0.1:<0.3684.0>:ns_port_server:log:166] moxi<0.3684.0>: 2012-04-10 18:20:13: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.3684.0>: 2012-04-10 18:20:13: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [menelaus:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:<0.3692.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.3760.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-04-10 18:20:28] [ns_1@127.0.0.1:<0.3693.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:713] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=1435500544;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;alog_path=/opt/couchbase/var/lib/couchdb/default/access.log;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3763.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3776.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3777.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3778.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3779.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3780.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3782.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3784.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.3785.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3762.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3786.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found,no_db_file} [views:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.3800.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:<0.3684.0>:ns_port_server:log:166] moxi<0.3684.0>: 2012-04-10 18:20:30: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.3684.0>: "name": "default", moxi<0.3684.0>: "nodeLocator": "vbucket", moxi<0.3684.0>: "saslPassword": "", moxi<0.3684.0>: "nodes": [{ moxi<0.3684.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.3684.0>: "replication": 0, moxi<0.3684.0>: "clusterMembership": "active", moxi<0.3684.0>: "status": "warmup", moxi<0.3684.0>: "thisNode": true, moxi<0.3684.0>: "hostname": "127.0.0.1:8091", moxi<0.3684.0>: "clusterCompatibility": 1, moxi<0.3684.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.3684.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.3684.0>: "ports": { moxi<0.3684.0>: "proxy": 11211, moxi<0.3684.0>: "direct": 11210 moxi<0.3684.0>: } moxi<0.3684.0>: }], moxi<0.3684.0>: "vBucketServerMap": { moxi<0.3684.0>: "hashAlgorithm": "CRC", moxi<0.3684.0>: "numReplicas": 1, moxi<0.3684.0>: "serverList": ["127.0.0.1:11210"], moxi<0.3684.0>: "vBucketMap": [] moxi<0.3684.0>: } moxi<0.3684.0>: }) [ns_server:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.396.0>: Failed to load mutation log, falling back to key dump memcached<0.396.0>: metadata loaded in 287 usec memcached<0.396.0>: warmup completed in 492 usec [user:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:312] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-04-10 18:20:28] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3684.0>:ns_port_server:log:166] moxi<0.3684.0>: 2012-04-10 18:20:30: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.3684.0>: "name": "default", moxi<0.3684.0>: "nodeLocator": "vbucket", moxi<0.3684.0>: "saslPassword": "", moxi<0.3684.0>: "nodes": [{ moxi<0.3684.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.3684.0>: "replication": 0, moxi<0.3684.0>: "clusterMembership": "active", moxi<0.3684.0>: "status": "healthy", moxi<0.3684.0>: "thisNode": true, moxi<0.3684.0>: "hostname": "127.0.0.1:8091", moxi<0.3684.0>: "clusterCompatibility": 1, moxi<0.3684.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.3684.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.3684.0>: "ports": { moxi<0.3684.0>: "proxy": 11211, moxi<0.3684.0>: "direct": 11210 moxi<0.3684.0>: } moxi<0.3684.0>: }], moxi<0.3684.0>: "vBucketServerMap": { moxi<0.3684.0>: "hashAlgorithm": "CRC", moxi<0.3684.0>: "numReplicas": 1, moxi<0.3684.0>: "serverList": ["127.0.0.1:11210"], moxi<0.3684.0>: "vBucketMap": [] moxi<0.3684.0>: } moxi<0.3684.0>: }) [views:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:20:29] [ns_1@127.0.0.1:<0.3756.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:20:33] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:20:33] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.3750.0>:couch_log:info:39] 10.1.2.49 - - PUT /default/_design/dev_test_view_on_1000_docs-040721b 201 [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] Calling couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, {set_view_params,256,[],[],true}]) [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view spawned worker {<0.6447.0>,#Ref<0.0.0.83751>} to open set view group `_design/dev_test_view_on_1000_docs-040721b`, set `default`, signature `19c3fedf0e6322b0d0671bd30b07cd1b`, new waiting list: [{<0.3800.0>,#Ref<0.0.0.83750>}] [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Started undefined main set view group `default`, group `_design/dev_test_view_on_1000_docs-040721b` [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6447.0>:couch_log:info:39] couch_set_view opener worker <0.6447.0> for set view group `_design/dev_test_view_on_1000_docs-040721b`, set `default`, signature `19c3fedf0e6322b0d0671bd30b07cd1b`, finishing with reply {ok, <0.6448.0>} [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b`, linked PID <0.6449.0> stopped normally [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view set view group `_design/dev_test_view_on_1000_docs-040721b`, set `default`, signature `19c3fedf0e6322b0d0671bd30b07cd1b`, opener worker {#Ref<0.0.0.83751>,<0.6447.0>} finished. Replying with {ok,<0.6448.0>} to waiting list: [{<0.3800.0>,#Ref<0.0.0.83750>}] [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6459.0>:couch_log:info:39] Started undefined replica set view group `default`, group `_design/dev_test_view_on_1000_docs-040721b` [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6459.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_1000_docs-040721b`, linked PID <0.6460.0> stopped normally [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6459.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_1000_docs-040721b`, configured with: 256 partitions no replica support initial active partitions [] initial passive partitions [] [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b`, configured with: 256 partitions replica support initial active partitions [] initial passive partitions [] [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, {set_view_params,256,[],[],true}]) returned ok in 17ms [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:393] Applying map to bucket default (ddoc _design/dev_test_view_on_1000_docs-040721b): [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:399] Classified vbuckets for "default" (ddoc _design/dev_test_view_on_1000_docs-040721b): Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:418] Calling couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13, 14,15,16,17,18,19,20,21,22,23, 24,25,26,27,28,29,30,31,32,33, 34,35,36,37,38,39,40,41,42,43, 44,45,46,47,48,49,50,51,52,53, 54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83, 84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102, 103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254, 255], [],[]]) [couchdb:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b`, partition states updated active partitions before: [] active partitions after: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] passive partitions before: [] passive partitions after: [] cleanup partitions before: [] cleanup partitions after: [] replica partitions before: [] replica partitions after: [] replicas on transfer before: [] replicas on transfer after: [] [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:419] couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28, 29,30,31,32,33,34,35,36,37,38,39,40,41, 42,43,44,45,46,47,48,49,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67, 68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102,103,104, 105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254, 255], [],[]]) returned ok in 35ms [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:423] Calling couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, []]) [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:424] couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, []]) returned ok in 0ms [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:425] Calling couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, []]) [views:info] [2012-04-10 18:20:39] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:426] couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_1000_docs-040721b">>, []]) returned ok in 0ms [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.4081.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_1000_docs-040721b/_view/dev_test_view_on_1000_docs-040721b?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Starting updater for set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6509.0>:couch_log:info:39] Updater for set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b` started Active partitions: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive partitions: [] Active partitions update seqs: [{0,1},{1,2},{2,5},{3,7},{4,7},{5,5},{6,2},{7,1},{8,3},{9,2},{10,8},{11,5},{12,5},{13,8},{14,2},{15,3},{16,5},{17,7},{18,2},{19,4},{20,4},{21,2},{22,7},{23,5},{24,7},{25,5},{26,3},{27,1},{28,1},{29,3},{30,5},{31,7},{32,6},{33,6},{34,3},{35,2},{36,2},{37,3},{38,6},{39,6},{40,6},{41,6},{42,1},{43,2},{44,2},{45,1},{46,6},{47,6},{48,2},{49,1},{50,6},{51,6},{52,6},{53,6},{54,1},{55,2},{56,2},{57,3},{58,6},{59,6},{60,6},{61,6},{62,3},{63,2},{64,3},{65,1},{66,6},{67,6},{68,6},{69,6},{70,1},{71,3},{72,3},{73,4},{74,6},{75,6},{76,6},{77,6},{78,4},{79,3},{80,6},{81,6},{82,4},{83,3},{84,3},{85,4},{86,6},{87,6},{88,6},{89,6},{90,1},{91,3},{92,3},{93,1},{94,6},{95,6},{96,8},{97,5},{98,1},{99,3},{100,3},{101,1},{102,5},{103,8},{104,5},{105,7},{106,2},{107,0},{108,0},{109,2},{110,7},{111,5},{112,1},{113,2},{114,7},{115,5},{116,5},{117,7},{118,2},{119,1},{120,3},{121,2},{122,5},{123,8},{124,8},{125,5},{126,2},{127,3},{128,6},{129,5},{130,2},{131,0},{132,0},{133,2},{134,5},{135,6},{136,6},{137,6},{138,2},{139,3},{140,3},{141,2},{142,6},{143,6},{144,3},{145,2},{146,6},{147,6},{148,6},{149,6},{150,2},{151,3},{152,0},{153,2},{154,5},{155,6},{156,6},{157,5},{158,2},{159,0},{160,2},{161,2},{162,7},{163,4},{164,4},{165,7},{166,2},{167,2},{168,2},{169,1},{170,4},{171,6},{172,6},{173,4},{174,1},{175,2},{176,6},{177,4},{178,2},{179,2},{180,2},{181,2},{182,4},{183,6},{184,4},{185,6},{186,2},{187,3},{188,3},{189,2},{190,6},{191,4},{192,4},{193,6},{194,0},{195,4},{196,4},{197,0},{198,6},{199,4},{200,6},{201,5},{202,4},{203,1},{204,1},{205,4},{206,5},{207,6},{208,1},{209,5},{210,5},{211,6},{212,6},{213,5},{214,5},{215,1},{216,5},{217,0},{218,6},{219,4},{220,4},{221,6},{222,0},{223,5},{224,2},{225,1},{226,5},{227,7},{228,7},{229,5},{230,1},{231,2},{232,0},{233,2},{234,6},{235,5},{236,5},{237,6},{238,2},{239,0},{240,5},{241,6},{242,2},{243,0},{244,0},{245,2},{246,6},{247,5},{248,7},{249,5},{250,1},{251,2},{252,2},{253,1},{254,5},{255,7}] Active partitions indexed update seqs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions update seqs: [] Passive partitions indexed update seqs: [] Active partitions # docs: [{0,1},{1,2},{2,5},{3,7},{4,7},{5,5},{6,2},{7,1},{8,3},{9,2},{10,8},{11,5},{12,5},{13,8},{14,2},{15,3},{16,5},{17,7},{18,2},{19,4},{20,4},{21,2},{22,7},{23,5},{24,7},{25,5},{26,3},{27,1},{28,1},{29,3},{30,5},{31,7},{32,6},{33,6},{34,3},{35,2},{36,2},{37,3},{38,6},{39,6},{40,6},{41,6},{42,1},{43,2},{44,2},{45,1},{46,6},{47,6},{48,2},{49,1},{50,6},{51,6},{52,6},{53,6},{54,1},{55,2},{56,2},{57,3},{58,6},{59,6},{60,6},{61,6},{62,3},{63,2},{64,3},{65,1},{66,6},{67,6},{68,6},{69,6},{70,1},{71,3},{72,3},{73,4},{74,6},{75,6},{76,6},{77,6},{78,4},{79,3},{80,6},{81,6},{82,4},{83,3},{84,3},{85,4},{86,6},{87,6},{88,6},{89,6},{90,1},{91,3},{92,3},{93,1},{94,6},{95,6},{96,8},{97,5},{98,1},{99,3},{100,3},{101,1},{102,5},{103,8},{104,5},{105,7},{106,2},{107,0},{108,0},{109,2},{110,7},{111,5},{112,1},{113,2},{114,7},{115,5},{116,5},{117,7},{118,2},{119,1},{120,3},{121,2},{122,5},{123,8},{124,8},{125,5},{126,2},{127,3},{128,6},{129,5},{130,2},{131,0},{132,0},{133,2},{134,5},{135,6},{136,6},{137,6},{138,2},{139,3},{140,3},{141,2},{142,6},{143,6},{144,3},{145,2},{146,6},{147,6},{148,6},{149,6},{150,2},{151,3},{152,0},{153,2},{154,5},{155,6},{156,6},{157,5},{158,2},{159,0},{160,2},{161,2},{162,7},{163,4},{164,4},{165,7},{166,2},{167,2},{168,2},{169,1},{170,4},{171,6},{172,6},{173,4},{174,1},{175,2},{176,6},{177,4},{178,2},{179,2},{180,2},{181,2},{182,4},{183,6},{184,4},{185,6},{186,2},{187,3},{188,3},{189,2},{190,6},{191,4},{192,4},{193,6},{194,0},{195,4},{196,4},{197,0},{198,6},{199,4},{200,6},{201,5},{202,4},{203,1},{204,1},{205,4},{206,5},{207,6},{208,1},{209,5},{210,5},{211,6},{212,6},{213,5},{214,5},{215,1},{216,5},{217,0},{218,6},{219,4},{220,4},{221,6},{222,0},{223,5},{224,2},{225,1},{226,5},{227,7},{228,7},{229,5},{230,1},{231,2},{232,0},{233,2},{234,6},{235,5},{236,5},{237,6},{238,2},{239,0},{240,5},{241,6},{242,2},{243,0},{244,0},{245,2},{246,6},{247,5},{248,7},{249,5},{250,1},{251,2},{252,2},{253,1},{254,5},{255,7}] Active partitions # deleted docs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions # docs: [] Passive partitions # deleted docs: [] Replicas to transfer: [] [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/0 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/1 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/2 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/3 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/4 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/5 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/6 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/7 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/8 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/9 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/10 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/11 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/12 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/13 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/14 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/15 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/16 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/17 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/18 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/19 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/20 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/21 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/22 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/23 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/24 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/25 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/26 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/27 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/28 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/29 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/30 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/31 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/32 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/33 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/34 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/35 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/36 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/37 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/38 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/39 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/40 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/41 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/42 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/43 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/44 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/45 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/46 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/47 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/48 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/49 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/50 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/51 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/52 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/53 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/54 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/55 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/56 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/57 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/58 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/59 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/60 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/61 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/62 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/63 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/64 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/65 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/66 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/67 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/68 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/69 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/70 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/71 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/72 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/73 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/74 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/75 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/76 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/77 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/78 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/79 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/80 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/81 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/82 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/83 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/84 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/85 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/86 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/87 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/88 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/89 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/90 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/91 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/92 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/93 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/94 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/95 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/96 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/97 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/98 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/99 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/100 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/101 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/102 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/103 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/104 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/105 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/106 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/107 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/108 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/109 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/110 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/111 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/112 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/113 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/114 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/115 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/116 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/117 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/118 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/119 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/120 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/121 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/122 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/123 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/124 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/125 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/126 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/127 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/128 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/129 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/130 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/131 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/132 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/133 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/134 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/135 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/136 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/137 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/138 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/139 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/140 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/141 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/142 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/143 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/144 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/145 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/146 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/147 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/148 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/149 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/150 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/151 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/152 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/153 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/154 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/155 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/156 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/157 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/158 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/159 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/160 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/161 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/162 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/163 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/164 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/165 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/166 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/167 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/168 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/169 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/170 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/171 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/172 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/173 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/174 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/175 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/176 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/177 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/178 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/179 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/180 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/181 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/182 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/183 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/184 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/185 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/186 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/187 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/188 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/189 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/190 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/191 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/192 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/193 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/194 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/195 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/196 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/197 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/198 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/199 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/200 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/201 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/202 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/203 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/204 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/205 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/206 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/207 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/208 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/209 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/210 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/211 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/212 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/213 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/214 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/215 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/216 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/217 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/218 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/219 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/220 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/221 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/222 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/223 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/224 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/225 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/226 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/227 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/228 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/229 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/230 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/231 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/232 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/233 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/234 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/235 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/236 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/237 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/238 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/239 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/240 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/241 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/242 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/243 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/244 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/245 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/246 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/247 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/248 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/249 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/250 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/251 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/252 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/253 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/254 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6515.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/255 to update main set view group `_design/dev_test_view_on_1000_docs-040721b` from set `default` [couchdb:info] [2012-04-10 18:20:45] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b`, updater finished Indexing time: 0.540 seconds Blocked time: 0.000 seconds Inserted IDs: 1000 Deleted IDs: 0 Inserted KVs: 1000 Deleted KVs: 0 Cleaned KVs: 0 [couchdb:info] [2012-04-10 18:20:55] [ns_1@127.0.0.1:<0.4094.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_1000_docs-040721b/_view/dev_test_view_on_1000_docs-040721b?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:21:08] [ns_1@127.0.0.1:<0.4068.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_1000_docs-040721b 200 [couchdb:info] [2012-04-10 18:21:08] [ns_1@127.0.0.1:<0.6506.0>:couch_log:info:39] 10.1.2.49 - - DELETE /default/_design/dev_test_view_on_1000_docs-040721b 200 [couchdb:info] [2012-04-10 18:21:08] [ns_1@127.0.0.1:<0.6448.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_1000_docs-040721b`, terminating with reason: normal [couchdb:info] [2012-04-10 18:21:08] [ns_1@127.0.0.1:<0.6459.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_1000_docs-040721b`, terminating with reason: shutdown [user:info] [2012-04-10 18:21:08] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:350] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [couchdb:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.6495.0>:couch_log:info:39] Shutting down spatial group server, monitored db is closing. [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [couchdb:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] Deleting index files for set `default` because database partition `default/master` was deleted [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/100">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/101">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/102">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/103">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/104">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/105">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/106">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/107">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/108">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/109">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/110">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/111">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/112">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/113">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/114">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/115">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/116">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/117">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/118">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/119">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/120">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/121">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/122">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/123">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/124">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/125">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/126">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/127">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/128">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/137">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/138">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/139">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/140">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/141">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/142">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/143">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/144">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/145">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/146">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/147">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/148">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/149">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/150">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/151">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/152">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/153">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/154">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/155">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/156">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/157">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/158">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/159">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/160">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/161">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/162">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/163">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/164">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/165">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/166">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/167">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/168">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/169">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/170">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/171">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/181">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/182">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/183">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/184">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/185">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/186">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/187">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Shutting down tap connections! [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/188">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/189">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/190">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/191">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/192">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/193">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/194">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/195">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/196">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/197">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/198">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/199">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/200">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/201">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/202">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/203">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/204">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/205">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/206">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/207">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/208">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/209">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/210">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/211">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/212">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/213">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/222">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/223">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/224">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/225">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/226">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/227">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/228">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/229">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/230">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/231">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/232">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/233">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/234">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/235">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/236">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/237">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/238">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/239">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/240">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/241">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/242">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/243">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/244">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/245">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/246">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/247">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/248">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/249">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/250">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/251">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/252">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/253">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/254">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/255">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/52">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/53">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/54">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/55">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/56">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/57">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/58">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/59">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/60">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/61">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/62">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/63">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/64">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/65">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/66">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/67">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/68">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/69">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/70">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/71">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/72">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/73">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/74">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/75">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/76">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/77">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/78">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/79">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/80">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/81">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/82">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/83">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/84">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/85">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/95">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/96">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/97">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/98">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/99">>: ok [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.345.0>:ns_orchestrator:idle:345] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.6868.0>:ns_port_sup:restart_port:134] restarting port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.3684.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.3684.0>:ns_port_server:log:166] moxi<0.3684.0>: EOL on stdin. Exiting [menelaus:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:<0.6551.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-04-10 18:21:09] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.6869.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:21:10] [ns_1@127.0.0.1:<0.6870.0>:ns_port_server:log:166] moxi<0.6870.0>: 2012-04-10 18:21:09: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.6870.0>: 2012-04-10 18:21:09: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [menelaus:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:<0.6472.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.6945.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-04-10 18:21:23] [ns_1@127.0.0.1:<0.6474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:713] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=1435500544;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;alog_path=/opt/couchbase/var/lib/couchdb/default/access.log;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6948.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6958.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6959.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6960.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6961.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6962.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6964.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6966.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.6967.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.6947.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.6968.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found,no_db_file} [views:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-04-10 18:21:23] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.6982.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6870.0>:ns_port_server:log:166] moxi<0.6870.0>: 2012-04-10 18:21:25: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.6870.0>: "name": "default", moxi<0.6870.0>: "nodeLocator": "vbucket", moxi<0.6870.0>: "saslPassword": "", moxi<0.6870.0>: "nodes": [{ moxi<0.6870.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.6870.0>: "replication": 0, moxi<0.6870.0>: "clusterMembership": "active", moxi<0.6870.0>: "status": "warmup", moxi<0.6870.0>: "thisNode": true, moxi<0.6870.0>: "hostname": "127.0.0.1:8091", moxi<0.6870.0>: "clusterCompatibility": 1, moxi<0.6870.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.6870.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.6870.0>: "ports": { moxi<0.6870.0>: "proxy": 11211, moxi<0.6870.0>: "direct": 11210 moxi<0.6870.0>: } moxi<0.6870.0>: }], moxi<0.6870.0>: "vBucketServerMap": { moxi<0.6870.0>: "hashAlgorithm": "CRC", moxi<0.6870.0>: "numReplicas": 1, moxi<0.6870.0>: "serverList": ["127.0.0.1:11210"], moxi<0.6870.0>: "vBucketMap": [] moxi<0.6870.0>: } moxi<0.6870.0>: }) [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.396.0>: Failed to load mutation log, falling back to key dump memcached<0.396.0>: metadata loaded in 306 usec memcached<0.396.0>: warmup completed in 457 usec [user:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:312] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6870.0>:ns_port_server:log:166] moxi<0.6870.0>: 2012-04-10 18:21:26: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.6870.0>: "name": "default", moxi<0.6870.0>: "nodeLocator": "vbucket", moxi<0.6870.0>: "saslPassword": "", moxi<0.6870.0>: "nodes": [{ moxi<0.6870.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.6870.0>: "replication": 0, moxi<0.6870.0>: "clusterMembership": "active", moxi<0.6870.0>: "status": "healthy", moxi<0.6870.0>: "thisNode": true, moxi<0.6870.0>: "hostname": "127.0.0.1:8091", moxi<0.6870.0>: "clusterCompatibility": 1, moxi<0.6870.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.6870.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.6870.0>: "ports": { moxi<0.6870.0>: "proxy": 11211, moxi<0.6870.0>: "direct": 11210 moxi<0.6870.0>: } moxi<0.6870.0>: }], moxi<0.6870.0>: "vBucketServerMap": { moxi<0.6870.0>: "hashAlgorithm": "CRC", moxi<0.6870.0>: "numReplicas": 1, moxi<0.6870.0>: "serverList": ["127.0.0.1:11210"], moxi<0.6870.0>: "vBucketMap": [] moxi<0.6870.0>: } moxi<0.6870.0>: }) [views:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:21:24] [ns_1@127.0.0.1:<0.6938.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:21:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:21:28] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.6932.0>:couch_log:info:39] 10.1.2.49 - - PUT /default/_design/dev_test_view_on_10000_docs-cbcea2a 201 [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] Calling couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, {set_view_params,256,[],[],true}]) [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view spawned worker {<0.9631.0>,#Ref<0.0.0.145737>} to open set view group `_design/dev_test_view_on_10000_docs-cbcea2a`, set `default`, signature `a69e9a8d6b14713a2eb9ffd1266e49c3`, new waiting list: [{<0.6982.0>,#Ref<0.0.0.145736>}] [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Started undefined main set view group `default`, group `_design/dev_test_view_on_10000_docs-cbcea2a` [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9631.0>:couch_log:info:39] couch_set_view opener worker <0.9631.0> for set view group `_design/dev_test_view_on_10000_docs-cbcea2a`, set `default`, signature `a69e9a8d6b14713a2eb9ffd1266e49c3`, finishing with reply {ok, <0.9632.0>} [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a`, linked PID <0.9633.0> stopped normally [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] couch_set_view set view group `_design/dev_test_view_on_10000_docs-cbcea2a`, set `default`, signature `a69e9a8d6b14713a2eb9ffd1266e49c3`, opener worker {#Ref<0.0.0.145737>,<0.9631.0>} finished. Replying with {ok,<0.9632.0>} to waiting list: [{<0.6982.0>,#Ref<0.0.0.145736>}] [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9643.0>:couch_log:info:39] Started undefined replica set view group `default`, group `_design/dev_test_view_on_10000_docs-cbcea2a` [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9643.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_10000_docs-cbcea2a`, linked PID <0.9644.0> stopped normally [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9643.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_10000_docs-cbcea2a`, configured with: 256 partitions no replica support initial active partitions [] initial passive partitions [] [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a`, configured with: 256 partitions replica support initial active partitions [] initial passive partitions [] [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, {set_view_params,256,[],[],true}]) returned ok in 16ms [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:393] Applying map to bucket default (ddoc _design/dev_test_view_on_10000_docs-cbcea2a): [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:399] Classified vbuckets for "default" (ddoc _design/dev_test_view_on_10000_docs-cbcea2a): Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:418] Calling couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13, 14,15,16,17,18,19,20,21,22,23, 24,25,26,27,28,29,30,31,32,33, 34,35,36,37,38,39,40,41,42,43, 44,45,46,47,48,49,50,51,52,53, 54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83, 84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102, 103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254, 255], [],[]]) [couchdb:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a`, partition states updated active partitions before: [] active partitions after: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] passive partitions before: [] passive partitions after: [] cleanup partitions before: [] cleanup partitions after: [] replica partitions before: [] replica partitions after: [] replicas on transfer before: [] replicas on transfer after: [] [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:419] couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28, 29,30,31,32,33,34,35,36,37,38,39,40,41, 42,43,44,45,46,47,48,49,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67, 68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93, 94,95,96,97,98,99,100,101,102,103,104, 105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254, 255], [],[]]) returned ok in 41ms [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:423] Calling couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, []]) [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:424] couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, []]) returned ok in 1ms [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:425] Calling couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, []]) [views:info] [2012-04-10 18:21:35] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:426] couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view_on_10000_docs-cbcea2a">>, []]) returned ok in 0ms [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.7248.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_10000_docs-cbcea2a/_view/dev_test_view_on_10000_docs-cbcea2a?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Starting updater for set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9704.0>:couch_log:info:39] Updater for set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a` started Active partitions: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive partitions: [] Active partitions update seqs: [{0,21},{1,9},{2,58},{3,62},{4,62},{5,58},{6,9},{7,21},{8,9},{9,20},{10,62},{11,58},{12,58},{13,62},{14,20},{15,9},{16,58},{17,62},{18,20},{19,9},{20,9},{21,20},{22,62},{23,58},{24,62},{25,58},{26,9},{27,21},{28,21},{29,9},{30,58},{31,62},{32,62},{33,65},{34,11},{35,22},{36,22},{37,11},{38,65},{39,62},{40,65},{41,62},{42,24},{43,11},{44,11},{45,24},{46,62},{47,65},{48,11},{49,24},{50,62},{51,65},{52,65},{53,62},{54,24},{55,11},{56,22},{57,11},{58,65},{59,62},{60,62},{61,65},{62,11},{63,22},{64,10},{65,21},{66,65},{67,62},{68,62},{69,65},{70,21},{71,10},{72,21},{73,11},{74,62},{75,65},{76,65},{77,62},{78,11},{79,21},{80,65},{81,62},{82,11},{83,21},{84,21},{85,11},{86,62},{87,65},{88,62},{89,65},{90,21},{91,10},{92,10},{93,21},{94,65},{95,62},{96,62},{97,58},{98,21},{99,9},{100,9},{101,21},{102,58},{103,62},{104,58},{105,62},{106,10},{107,23},{108,23},{109,10},{110,62},{111,58},{112,22},{113,11},{114,62},{115,58},{116,58},{117,62},{118,11},{119,22},{120,9},{121,21},{122,58},{123,62},{124,62},{125,58},{126,21},{127,9},{128,67},{129,64},{130,10},{131,20},{132,20},{133,10},{134,64},{135,67},{136,64},{137,67},{138,20},{139,11},{140,11},{141,20},{142,67},{143,64},{144,11},{145,20},{146,67},{147,64},{148,64},{149,67},{150,20},{151,11},{152,20},{153,10},{154,64},{155,67},{156,67},{157,64},{158,10},{159,20},{160,21},{161,9},{162,61},{163,61},{164,61},{165,61},{166,9},{167,21},{168,10},{169,23},{170,61},{171,61},{172,61},{173,61},{174,23},{175,10},{176,62},{177,60},{178,23},{179,10},{180,10},{181,23},{182,60},{183,62},{184,60},{185,62},{186,9},{187,21},{188,21},{189,9},{190,62},{191,60},{192,61},{193,61},{194,20},{195,9},{196,9},{197,20},{198,61},{199,61},{200,61},{201,61},{202,9},{203,19},{204,19},{205,9},{206,61},{207,61},{208,19},{209,9},{210,60},{211,62},{212,62},{213,60},{214,9},{215,19},{216,9},{217,20},{218,62},{219,60},{220,60},{221,62},{222,20},{223,9},{224,11},{225,21},{226,64},{227,67},{228,67},{229,64},{230,21},{231,11},{232,23},{233,11},{234,67},{235,64},{236,64},{237,67},{238,11},{239,23},{240,64},{241,67},{242,11},{243,23},{244,23},{245,11},{246,67},{247,64},{248,67},{249,64},{250,21},{251,11},{252,11},{253,21},{254,64},{255,67}] Active partitions indexed update seqs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions update seqs: [] Passive partitions indexed update seqs: [] Active partitions # docs: [{0,21},{1,9},{2,58},{3,62},{4,62},{5,58},{6,9},{7,21},{8,9},{9,20},{10,62},{11,58},{12,58},{13,62},{14,20},{15,9},{16,58},{17,62},{18,20},{19,9},{20,9},{21,20},{22,62},{23,58},{24,62},{25,58},{26,9},{27,21},{28,21},{29,9},{30,58},{31,62},{32,62},{33,65},{34,11},{35,22},{36,22},{37,11},{38,65},{39,62},{40,65},{41,62},{42,24},{43,11},{44,11},{45,24},{46,62},{47,65},{48,11},{49,24},{50,62},{51,65},{52,65},{53,62},{54,24},{55,11},{56,22},{57,11},{58,65},{59,62},{60,62},{61,65},{62,11},{63,22},{64,10},{65,21},{66,65},{67,62},{68,62},{69,65},{70,21},{71,10},{72,21},{73,11},{74,62},{75,65},{76,65},{77,62},{78,11},{79,21},{80,65},{81,62},{82,11},{83,21},{84,21},{85,11},{86,62},{87,65},{88,62},{89,65},{90,21},{91,10},{92,10},{93,21},{94,65},{95,62},{96,62},{97,58},{98,21},{99,9},{100,9},{101,21},{102,58},{103,62},{104,58},{105,62},{106,10},{107,23},{108,23},{109,10},{110,62},{111,58},{112,22},{113,11},{114,62},{115,58},{116,58},{117,62},{118,11},{119,22},{120,9},{121,21},{122,58},{123,62},{124,62},{125,58},{126,21},{127,9},{128,67},{129,64},{130,10},{131,20},{132,20},{133,10},{134,64},{135,67},{136,64},{137,67},{138,20},{139,11},{140,11},{141,20},{142,67},{143,64},{144,11},{145,20},{146,67},{147,64},{148,64},{149,67},{150,20},{151,11},{152,20},{153,10},{154,64},{155,67},{156,67},{157,64},{158,10},{159,20},{160,21},{161,9},{162,61},{163,61},{164,61},{165,61},{166,9},{167,21},{168,10},{169,23},{170,61},{171,61},{172,61},{173,61},{174,23},{175,10},{176,62},{177,60},{178,23},{179,10},{180,10},{181,23},{182,60},{183,62},{184,60},{185,62},{186,9},{187,21},{188,21},{189,9},{190,62},{191,60},{192,61},{193,61},{194,20},{195,9},{196,9},{197,20},{198,61},{199,61},{200,61},{201,61},{202,9},{203,19},{204,19},{205,9},{206,61},{207,61},{208,19},{209,9},{210,60},{211,62},{212,62},{213,60},{214,9},{215,19},{216,9},{217,20},{218,62},{219,60},{220,60},{221,62},{222,20},{223,9},{224,11},{225,21},{226,64},{227,67},{228,67},{229,64},{230,21},{231,11},{232,23},{233,11},{234,67},{235,64},{236,64},{237,67},{238,11},{239,23},{240,64},{241,67},{242,11},{243,23},{244,23},{245,11},{246,67},{247,64},{248,67},{249,64},{250,21},{251,11},{252,11},{253,21},{254,64},{255,67}] Active partitions # deleted docs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0},{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0},{51,0},{52,0},{53,0},{54,0},{55,0},{56,0},{57,0},{58,0},{59,0},{60,0},{61,0},{62,0},{63,0},{64,0},{65,0},{66,0},{67,0},{68,0},{69,0},{70,0},{71,0},{72,0},{73,0},{74,0},{75,0},{76,0},{77,0},{78,0},{79,0},{80,0},{81,0},{82,0},{83,0},{84,0},{85,0},{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0},{94,0},{95,0},{96,0},{97,0},{98,0},{99,0},{100,0},{101,0},{102,0},{103,0},{104,0},{105,0},{106,0},{107,0},{108,0},{109,0},{110,0},{111,0},{112,0},{113,0},{114,0},{115,0},{116,0},{117,0},{118,0},{119,0},{120,0},{121,0},{122,0},{123,0},{124,0},{125,0},{126,0},{127,0},{128,0},{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0},{136,0},{137,0},{138,0},{139,0},{140,0},{141,0},{142,0},{143,0},{144,0},{145,0},{146,0},{147,0},{148,0},{149,0},{150,0},{151,0},{152,0},{153,0},{154,0},{155,0},{156,0},{157,0},{158,0},{159,0},{160,0},{161,0},{162,0},{163,0},{164,0},{165,0},{166,0},{167,0},{168,0},{169,0},{170,0},{171,0},{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0},{179,0},{180,0},{181,0},{182,0},{183,0},{184,0},{185,0},{186,0},{187,0},{188,0},{189,0},{190,0},{191,0},{192,0},{193,0},{194,0},{195,0},{196,0},{197,0},{198,0},{199,0},{200,0},{201,0},{202,0},{203,0},{204,0},{205,0},{206,0},{207,0},{208,0},{209,0},{210,0},{211,0},{212,0},{213,0},{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0},{221,0},{222,0},{223,0},{224,0},{225,0},{226,0},{227,0},{228,0},{229,0},{230,0},{231,0},{232,0},{233,0},{234,0},{235,0},{236,0},{237,0},{238,0},{239,0},{240,0},{241,0},{242,0},{243,0},{244,0},{245,0},{246,0},{247,0},{248,0},{249,0},{250,0},{251,0},{252,0},{253,0},{254,0},{255,0}] Passive partitions # docs: [] Passive partitions # deleted docs: [] Replicas to transfer: [] [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/0 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/1 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/2 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/3 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/4 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/5 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/6 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/7 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/8 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/9 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/10 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/11 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/12 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/13 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/14 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/15 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/16 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/17 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/18 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/19 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/20 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/21 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/22 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/23 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/24 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/25 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/26 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/27 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/28 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/29 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:45] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/30 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/31 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/32 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/33 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/34 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/35 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/36 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/37 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/38 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/39 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/40 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/41 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/42 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/43 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/44 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/45 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/46 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/47 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/48 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/49 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/50 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/51 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/52 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/53 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/54 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/55 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/56 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/57 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/58 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/59 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/60 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/61 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/62 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/63 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/64 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/65 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/66 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/67 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/68 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/69 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/70 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/71 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/72 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/73 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/74 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/75 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/76 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/77 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/78 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/79 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/80 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/81 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/82 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/83 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/84 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/85 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/86 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/87 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/88 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/89 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/90 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/91 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/92 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/93 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/94 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/95 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/96 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/97 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/98 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/99 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/100 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/101 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/102 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/103 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/104 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/105 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/106 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/107 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/108 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/109 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/110 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/111 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/112 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/113 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/114 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/115 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/116 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/117 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/118 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/119 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/120 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/121 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/122 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/123 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/124 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/125 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/126 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/127 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/128 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/129 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/130 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/131 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/132 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/133 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:46] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/134 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/135 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/136 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/137 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/138 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/139 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/140 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/141 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/142 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/143 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/144 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/145 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/146 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/147 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/148 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/149 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/150 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/151 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/152 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/153 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/154 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/155 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/156 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/157 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/158 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/159 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/160 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/161 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/162 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/163 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/164 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/165 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/166 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/167 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/168 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/169 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/170 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/171 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/172 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/173 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/174 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/175 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/176 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/177 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/178 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/179 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/180 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/181 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/182 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/183 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/184 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/185 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/186 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/187 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/188 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/189 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/190 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/191 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/192 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/193 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/194 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/195 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/196 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/197 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/198 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/199 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/200 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/201 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/202 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/203 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/204 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/205 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/206 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/207 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/208 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/209 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/210 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/211 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/212 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/213 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/214 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/215 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/216 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/217 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/218 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/219 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/220 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/221 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/222 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/223 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/224 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/225 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/226 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/227 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/228 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/229 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/230 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/231 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/232 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/233 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/234 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/235 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/236 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:47] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/237 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/238 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/239 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/240 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/241 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/242 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/243 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/244 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/245 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/246 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/247 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/248 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/249 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/250 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/251 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/252 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/253 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/254 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9710.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/255 to update main set view group `_design/dev_test_view_on_10000_docs-cbcea2a` from set `default` [couchdb:info] [2012-04-10 18:21:48] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a`, updater finished Indexing time: 2.744 seconds Blocked time: 0.000 seconds Inserted IDs: 10000 Deleted IDs: 0 Inserted KVs: 10000 Deleted KVs: 0 Cleaned KVs: 0 [couchdb:info] [2012-04-10 18:21:55] [ns_1@127.0.0.1:<0.7262.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_10000_docs-cbcea2a/_view/dev_test_view_on_10000_docs-cbcea2a?connection_timeout=60000&full_set=true&stale=update_after 200 [couchdb:info] [2012-04-10 18:22:10] [ns_1@127.0.0.1:<0.9748.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view_on_10000_docs-cbcea2a 200 [couchdb:info] [2012-04-10 18:22:10] [ns_1@127.0.0.1:<0.9752.0>:couch_log:info:39] 10.1.2.49 - - DELETE /default/_design/dev_test_view_on_10000_docs-cbcea2a 200 [couchdb:info] [2012-04-10 18:22:10] [ns_1@127.0.0.1:<0.9632.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view_on_10000_docs-cbcea2a`, terminating with reason: normal [couchdb:info] [2012-04-10 18:22:10] [ns_1@127.0.0.1:<0.9643.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view_on_10000_docs-cbcea2a`, terminating with reason: shutdown [user:info] [2012-04-10 18:22:10] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:terminate:350] Shutting down bucket "default" on 'ns_1@127.0.0.1' for deletion [couchdb:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.9688.0>:couch_log:info:39] Shutting down spatial group server, monitored db is closing. [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [couchdb:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:couch_set_view:couch_log:info:39] Deleting index files for set `default` because database partition `default/master` was deleted [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/100">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/101">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/102">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/103">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/104">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/105">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/106">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/107">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/108">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/109">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/110">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/111">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/112">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/113">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/114">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/115">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/116">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/117">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/118">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/119">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/120">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/121">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/122">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/123">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/124">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/125">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/126">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/127">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/128">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/137">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/138">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/139">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/140">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/141">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/142">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/143">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/144">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/145">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/146">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/147">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/148">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/149">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/150">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/151">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/152">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/153">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/154">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/155">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/156">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/157">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/158">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/159">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/160">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/161">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/162">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/163">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/164">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/165">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/166">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/167">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/168">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/169">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/170">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/171">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Shutting down tap connections! [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/181">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/182">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/183">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/184">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/185">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/186">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/187">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/188">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/189">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/190">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/191">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/192">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/193">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/194">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/195">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/196">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/197">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/198">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/199">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/200">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/201">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/202">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/203">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/204">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/205">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/206">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/207">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/208">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/209">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/210">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/211">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/212">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/213">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/222">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/223">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/224">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/225">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/226">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/227">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/228">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/229">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/230">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/231">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/232">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/233">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/234">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/235">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/236">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/237">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/238">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/239">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/240">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/241">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/242">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/243">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/244">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/245">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/246">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/247">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/248">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/249">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/250">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/251">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/252">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/253">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/254">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/255">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/52">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/53">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/54">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/55">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/56">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/57">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/58">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/59">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/60">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/61">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/62">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/63">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/64">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/65">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/66">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/67">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/68">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/69">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/70">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/71">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/72">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/73">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/74">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/75">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/76">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/77">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/78">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/79">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/80">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/81">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/82">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/83">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/84">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/85">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/95">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/96">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/97">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/98">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/99">>: ok [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.345.0>:ns_orchestrator:idle:345] Restarting moxi on nodes ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.10068.0>:ns_port_sup:restart_port:134] restarting port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.6870.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.6870.0>:ns_port_server:log:166] moxi<0.6870.0>: EOL on stdin. Exiting [menelaus:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.9615.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [error_logger:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.10069.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:22:11] [ns_1@127.0.0.1:<0.10070.0>:ns_port_server:log:166] moxi<0.10070.0>: 2012-04-10 18:22:11: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.10070.0>: 2012-04-10 18:22:11: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [menelaus:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:<0.9656.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@127.0.0.1'] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.10163.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [stats:error] [2012-04-10 18:22:25] [ns_1@127.0.0.1:<0.9658.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@127.0.0.1'] [ns_server:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:ensure_bucket:713] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=1435500544;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;alog_path=/opt/couchbase/var/lib/couchdb/default/access.log;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10168.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10180.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10181.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10182.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10183.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10184.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10186.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10188.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.10189.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.10166.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.10190.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found,no_db_file} [views:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.10204.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:<0.10070.0>:ns_port_server:log:166] moxi<0.10070.0>: 2012-04-10 18:22:27: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.10070.0>: "name": "default", moxi<0.10070.0>: "nodeLocator": "vbucket", moxi<0.10070.0>: "saslPassword": "", moxi<0.10070.0>: "nodes": [{ moxi<0.10070.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.10070.0>: "replication": 0, moxi<0.10070.0>: "clusterMembership": "active", moxi<0.10070.0>: "status": "warmup", moxi<0.10070.0>: "thisNode": true, moxi<0.10070.0>: "hostname": "127.0.0.1:8091", moxi<0.10070.0>: "clusterCompatibility": 1, moxi<0.10070.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.10070.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.10070.0>: "ports": { moxi<0.10070.0>: "proxy": 11211, moxi<0.10070.0>: "direct": 11210 moxi<0.10070.0>: } moxi<0.10070.0>: }], moxi<0.10070.0>: "vBucketServerMap": { moxi<0.10070.0>: "hashAlgorithm": "CRC", moxi<0.10070.0>: "numReplicas": 1, moxi<0.10070.0>: "serverList": ["127.0.0.1:11210"], moxi<0.10070.0>: "vBucketMap": [] moxi<0.10070.0>: } moxi<0.10070.0>: }) [ns_server:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.396.0>: Failed to load mutation log, falling back to key dump memcached<0.396.0>: metadata loaded in 196 usec memcached<0.396.0>: warmup completed in 296 usec [user:info] [2012-04-10 18:22:25] [ns_1@127.0.0.1:'ns_memcached-default':ns_memcached:handle_info:312] Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@127.0.0.1': ["default"] [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10070.0>:ns_port_server:log:166] moxi<0.10070.0>: 2012-04-10 18:22:27: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.10070.0>: "name": "default", moxi<0.10070.0>: "nodeLocator": "vbucket", moxi<0.10070.0>: "saslPassword": "", moxi<0.10070.0>: "nodes": [{ moxi<0.10070.0>: "couchApiBase": "http://127.0.0.1:8092/default", moxi<0.10070.0>: "replication": 0, moxi<0.10070.0>: "clusterMembership": "active", moxi<0.10070.0>: "status": "healthy", moxi<0.10070.0>: "thisNode": true, moxi<0.10070.0>: "hostname": "127.0.0.1:8091", moxi<0.10070.0>: "clusterCompatibility": 1, moxi<0.10070.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.10070.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.10070.0>: "ports": { moxi<0.10070.0>: "proxy": 11211, moxi<0.10070.0>: "direct": 11210 moxi<0.10070.0>: } moxi<0.10070.0>: }], moxi<0.10070.0>: "vBucketServerMap": { moxi<0.10070.0>: "hashAlgorithm": "CRC", moxi<0.10070.0>: "numReplicas": 1, moxi<0.10070.0>: "serverList": ["127.0.0.1:11210"], moxi<0.10070.0>: "vBucketMap": [] moxi<0.10070.0>: } moxi<0.10070.0>: }) [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 0 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 1 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 2 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 3 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]}, {passive,[]}, {ignore,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {replica,[]}] [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 4 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 5 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 6 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 7 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 8 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 9 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 10 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 11 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 12 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 13 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 14 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 15 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 16 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 17 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 18 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 19 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 20 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 21 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 22 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 23 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 24 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 25 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 26 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 27 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 28 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 29 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 30 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 31 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 32 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 33 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 34 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 35 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 36 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 37 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 38 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 39 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 40 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 41 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 42 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 43 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 44 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 45 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 46 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 47 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 48 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 49 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 50 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 51 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 52 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 53 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 54 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 55 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 56 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 57 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 58 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 59 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 60 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 61 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 62 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 63 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 64 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 65 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 66 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 67 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 68 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 69 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 70 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 71 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 72 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 73 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 74 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 75 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 76 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 77 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 78 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 79 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 80 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 81 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 82 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 83 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 84 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 85 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 86 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 87 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 88 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 89 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 90 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 91 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 92 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 93 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 94 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 95 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 96 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 97 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 98 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 99 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 100 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 101 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 102 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 103 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 104 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 105 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 106 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 107 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 108 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 109 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 110 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 111 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 112 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 113 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 114 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 115 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 116 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 117 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 118 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 119 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 120 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 121 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 122 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 123 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 124 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 125 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 126 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 127 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 128 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 129 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 130 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 131 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 132 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 133 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 134 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 135 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 136 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 137 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 138 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 139 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 140 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 141 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 142 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 143 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 144 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 145 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 146 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 147 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 148 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 149 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 150 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 151 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 152 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 153 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 154 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 155 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 156 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 157 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 158 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 159 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 160 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 161 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 162 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 163 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 164 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 165 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 166 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 167 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 168 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 169 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 170 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 171 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 172 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 173 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 174 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 175 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 176 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 177 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 178 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 179 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 180 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 181 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 182 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 183 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 184 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 185 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 186 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 187 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 188 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 189 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 190 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 191 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 192 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 193 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 194 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 195 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 196 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 197 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 198 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 199 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 200 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 201 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 202 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 203 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 204 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 205 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 206 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 207 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 208 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 209 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 210 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 211 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 212 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 213 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 214 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 215 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 216 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 217 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 218 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 219 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 220 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 221 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 222 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 223 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 224 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 225 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 226 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 227 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 228 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 229 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 230 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 231 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 232 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 233 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 234 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 235 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 236 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 237 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 238 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 239 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 240 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 241 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 242 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 243 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 244 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 245 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 246 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 247 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 248 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 249 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 250 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 251 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 252 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 253 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 254 in "default" on 'ns_1@127.0.0.1' from missing to active. [ns_server:info] [2012-04-10 18:22:26] [ns_1@127.0.0.1:<0.10160.0>:ns_janitor:do_sanify_chain:147] Setting vbucket 255 in "default" on 'ns_1@127.0.0.1' from missing to active. [views:info] [2012-04-10 18:22:30] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69, 70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:22:30] [ns_1@127.0.0.1:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71, 72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [cluster:info] [2012-04-10 18:22:36] [ns_1@127.0.0.1:ns_cluster:ns_cluster:do_change_address:268] Decided to change address to "10.1.2.30" [user:warn] [2012-04-10 18:22:36] [nonode@nohost:ns_node_disco:ns_node_disco:handle_info:151] Node nonode@nohost saw that node 'ns_1@127.0.0.1' went down. [ns_server:info] [2012-04-10 18:22:36] [nonode@nohost:dist_manager:dist_manager:handle_call:136] Adjusted IP to "10.1.2.30" [ns_server:info] [2012-04-10 18:22:36] [nonode@nohost:dist_manager:dist_manager:bringup:114] Attempting to bring up net_kernel with name 'ns_1@10.1.2.30' [ns_server:info] [2012-04-10 18:22:36] [nonode@nohost:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: [] [error_logger:error] [2012-04-10 18:22:36] [nonode@nohost:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [user:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.30' came up. [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:dist_manager:dist_manager:save_node:81] saving node to "/opt/couchbase/var/lib/couchbase/couchbase-server.node" [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.10190.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.10191.0>,<0.10203.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 28657 stack_size: 24 reductions: 4024 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.10190.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:dist_manager:dist_manager:handle_call:140] Re-setting cookie {olcyvmepmlevmwcj,'ns_1@10.1.2.30'} [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:dist_manager:dist_manager:save_address_config:77] saving ip config to "/opt/couchbase/var/lib/couchbase/ip" [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server xdc_rdoc_replication_srv terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,xdc_rdoc_replication_srv,ok, xdc_rdoc_replication_srv,[], [{<<"_design/_replicator_info">>,{0,<<>>}}, {<<"_design/_replicator">>,{0,<<>>}}]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.433.0> registered_name: xdc_rdoc_replication_srv exception exit: {badarg, [{erlang,monitor, [process, {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: [ns_server_sup,ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.322.0>,<0.434.0>] dictionary: [] trap_exit: false status: running heap_size: 1597 stack_size: 24 reductions: 1310 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,ns_server_sup} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.433.0>}, {name,xdc_rdoc_replication_srv}, {mfargs,{xdc_rdoc_replication_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:22:36] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'} process down: noconnection [ns_server:warn] [2012-04-10 18:22:36] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'} process down: noconnection [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12858.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.12862.0>}, {name,erl_epmd}, {mfargs,{erl_epmd,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12858.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.12859.0>,<0.12861.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1195 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12858.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.12863.0>}, {name,auth}, {mfargs,{auth,start_link,[]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.12860.0>}, {name,xdc_rdoc_replication_srv}, {mfargs,{xdc_rdoc_replication_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server xdc_rdoc_replication_srv terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,xdc_rdoc_replication_srv,ok, xdc_rdoc_replication_srv,[], [{<<"_design/_replicator_info">>,{0,<<>>}}, {<<"_design/_replicator">>,{0,<<>>}}]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12860.0> registered_name: xdc_rdoc_replication_srv exception exit: {badarg, [{erlang,monitor, [process, {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: [ns_server_sup,ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.322.0>,<0.12867.0>] dictionary: [] trap_exit: false status: running heap_size: 1597 stack_size: 24 reductions: 1335 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,ns_server_sup} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {xdc_rdoc_replication_srv,'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12860.0>}, {name,xdc_rdoc_replication_srv}, {mfargs,{xdc_rdoc_replication_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12864.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12864.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.12866.0>,<0.12868.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1171 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12864.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12870.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12870.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.12871.0>,<0.12872.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1247 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12870.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12873.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12873.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.12874.0>,<0.12875.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1263 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12873.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12876.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'capi_ddoc_replication_srv-default' terminating ** Last message in was replicate_newnodes_docs ** When Server state == {state,capi_ddoc_replication_srv, {state,"default",<<"default/master">>}, 'capi_ddoc_replication_srv-default',[],[]} ** Reason for termination == ** {badarg, [{erlang,monitor, [process,{'capi_ddoc_replication_srv-default','ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv,'-replicate_newnodes_docs/1-lc$^0/1-0-', 2, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"},{line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"},{line,118}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: cb_generic_replication_srv:init/1 pid: <0.12876.0> registered_name: 'capi_ddoc_replication_srv-default' exception exit: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs, 1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['single_bucket_sup-default',<0.10163.0>] messages: [] links: [<0.12877.0>,<0.12878.0>,<0.10164.0>] dictionary: [] trap_exit: false status: running heap_size: 10946 stack_size: 24 reductions: 1279 neighbours: [error_logger:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'single_bucket_sup-default'} Context: child_terminated Reason: {badarg, [{erlang,monitor, [process, {'capi_ddoc_replication_srv-default', 'ns_1@127.0.0.1'}], []}, {cb_generic_replication_srv, '-replicate_newnodes_docs/1-lc$^0/1-0-',2, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,replicate_newnodes_docs,1, [{file,"src/cb_generic_replication_srv.erl"}, {line,138}]}, {cb_generic_replication_srv,handle_info,2, [{file,"src/cb_generic_replication_srv.erl"}, {line,118}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,597}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.12876.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs,{capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30'] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.12879.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,net_sup} started: [{pid,<0.12865.0>}, {name,net_kernel}, {mfargs, {net_kernel,start_link, [['ns_1@10.1.2.30',longnames]]}}, {restart_type,permanent}, {shutdown,2000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,kernel_sup} started: [{pid,<0.12857.0>}, {name,net_sup_dynamic}, {mfargs, {erl_distribution,start_link, [['ns_1@10.1.2.30',longnames]]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.12869.0>}, {name,xdc_rdoc_replication_srv}, {mfargs,{xdc_rdoc_replication_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================INFO REPORT========================= application: mnesia exited: stopped type: temporary [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:mb_mnesia:mb_mnesia:init:249] Found backup. Restoring Mnesia database. [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_sup} started: [{pid,<0.12919.0>}, {name,mnesia_event}, {mfargs,{mnesia_sup,start_event,[]}}, {restart_type,permanent}, {shutdown,30000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12921.0>}, {name,mnesia_monitor}, {mfargs,{mnesia_monitor,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12922.0>}, {name,mnesia_subscr}, {mfargs,{mnesia_subscr,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12923.0>}, {name,mnesia_locker}, {mfargs,{mnesia_locker,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12924.0>}, {name,mnesia_recover}, {mfargs,{mnesia_recover,start,[]}}, {restart_type,permanent}, {shutdown,180000}, {child_type,worker}] [stats:error] [2012-04-10 18:22:36] [ns_1@10.1.2.30:<0.3685.0>:stats_reader:log_bad_responses:185] Bad replies: [{'ns_1@10.1.2.30', {error, {exit, {aborted, {no_exists, ['stats_archiver-default-minute']}}}}}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12925.0>}, {name,mnesia_tm}, {mfargs,{mnesia_tm,start,[]}}, {restart_type,permanent}, {shutdown,30000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12994.0>}, {name,mnesia_checkpoint_sup}, {mfargs,{mnesia_checkpoint_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12995.0>}, {name,mnesia_snmp_sup}, {mfargs,{mnesia_snmp_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12996.0>}, {name,mnesia_controller}, {mfargs,{mnesia_controller,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_kernel_sup} started: [{pid,<0.12997.0>}, {name,mnesia_late_loader}, {mfargs,{mnesia_late_loader,start,[]}}, {restart_type,permanent}, {shutdown,3000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,mnesia_sup} started: [{pid,<0.12920.0>}, {name,mnesia_kernel_sup}, {mfargs,{mnesia_kernel_sup,start,[]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= application: mnesia started_at: 'ns_1@10.1.2.30' [ns_server:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:mb_mnesia:mb_mnesia:handle_info:186] Saw Mnesia go down on 'ns_1@10.1.2.30' [cluster:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:ns_cluster:ns_cluster:do_change_address:274] Renamed node. New name is 'ns_1@10.1.2.30'. [error_logger:info] [2012-04-10 18:22:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.13010.0>}, {name,ns_doctor}, {mfargs,{ns_doctor,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [menelaus:warn] [2012-04-10 18:22:37] [ns_1@10.1.2.30:<0.3685.0>:menelaus_web:loop:291] Client-side error-report for user "Administrator" on node 'ns_1@10.1.2.30': User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22 Got unhandled error: TypeError: 'undefined' is not an object (evaluating 'storage.ram.quotaTotal') At: http://10.1.2.30:8091/js/settings.js:380 Backtrace: Function: collectBacktraceViaCaller Args: --------- Function: appOnError Args: "TypeError: 'undefined' is not an object (evaluating 'storage.ram.quotaTotal')" "http://10.1.2.30:8091/js/settings.js" 380 --------- [error_logger:error] [2012-04-10 18:22:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server ns_doctor terminating ** Last message in was {get_node,'ns_1@10.1.2.30'} ** When Server state == {state,{dict,0,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[],[], [],[],[]}, {{[],[],[],[],[],[],[],[],[],[],[],[],[], [],[],[]}}}, undefined,undefined,undefined} ** Reason for termination == ** {badarg,[{dict,fetch, ['ns_1@10.1.2.30', {dict,0,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]}, {{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]}}}], [{file,"dict.erl"},{line,125}]}, {ns_doctor,handle_call,3,[{file,"src/ns_doctor.erl"},{line,92}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [ns_doctor:error] [2012-04-10 18:22:41] [ns_1@10.1.2.30:ns_cluster:ns_doctor:get_node:164] Error attempting to get node 'ns_1@10.1.2.30': {exit, {{badarg, [{dict,fetch, ['ns_1@10.1.2.30', {dict,0,16,16,8,80,48, {[],[],[],[],[],[],[], [],[],[],[],[],[],[], [],[]}, {{[],[],[],[],[],[],[], [],[],[],[],[],[],[], [],[]}}}], [{file,"dict.erl"}, {line,125}]}, {ns_doctor,handle_call,3, [{file, "src/ns_doctor.erl"}, {line,92}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"}, {line,578}]}, {proc_lib,init_p_do_apply, 3, [{file,"proc_lib.erl"}, {line,227}]}]}, {gen_server,call, [ns_doctor, {get_node, 'ns_1@10.1.2.30'}]}}} [error_logger:error] [2012-04-10 18:22:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_doctor:init/1 pid: <0.13010.0> registered_name: ns_doctor exception exit: {badarg, [{dict,fetch, ['ns_1@10.1.2.30', {dict,0,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[],[],[], [],[]}, {{[],[],[],[],[],[],[],[],[],[],[],[],[],[], [],[]}}}], [{file,"dict.erl"},{line,125}]}, {ns_doctor,handle_call,3, [{file,"src/ns_doctor.erl"},{line,92}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: [ns_server_sup,ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.322.0>,<0.13011.0>,<0.56.0>] dictionary: [] trap_exit: false status: running heap_size: 610 stack_size: 24 reductions: 1356 neighbours: [error_logger:error] [2012-04-10 18:22:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,ns_server_sup} Context: child_terminated Reason: {badarg, [{dict,fetch, ['ns_1@10.1.2.30', {dict,0,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[],[],[],[], []}, {{[],[],[],[],[],[],[],[],[],[],[],[],[],[],[], []}}}], [{file,"dict.erl"},{line,125}]}, {ns_doctor,handle_call,3, [{file,"src/ns_doctor.erl"},{line,92}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.13010.0>}, {name,ns_doctor}, {mfargs,{ns_doctor,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:22:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_server_sup} started: [{pid,<0.13096.0>}, {name,ns_doctor}, {mfargs,{ns_doctor,start_link,[]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_doctor:error] [2012-04-10 18:22:46] [ns_1@10.1.2.30:ns_heart:ns_doctor:get_nodes:155] Error attempting to get nodes: {exit, {timeout, {gen_server,call,[ns_doctor,get_nodes]}}} [ns_server:info] [2012-04-10 18:22:46] [ns_1@10.1.2.30:cb_replication:cb_replication:handle_call:155] Switching replication mode for default from new to compat [ns_server:info] [2012-04-10 18:22:46] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.30': ["default"] [cluster:info] [2012-04-10 18:22:47] [ns_1@10.1.2.30:ns_cluster:ns_cluster:node_add_transaction:484] Started node add transaction by adding node 'ns_1@10.1.2.31' to nodes_wanted [user:info] [2012-04-10 18:22:47] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.31' came up. [ns_server:info] [2012-04-10 18:22:47] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30','ns_1@10.1.2.31'] [ns_server:warn] [2012-04-10 18:22:47] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.31'} process down: noproc [cluster:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_cluster:ns_cluster:node_add_transaction:484] Started node add transaction by adding node 'ns_1@10.1.2.32' to nodes_wanted [user:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.32' came up. [ns_server:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30','ns_1@10.1.2.31', 'ns_1@10.1.2.32'] [ns_server:warn] [2012-04-10 18:22:48] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.32'} process down: noproc [cluster:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_cluster:ns_cluster:node_add_transaction:484] Started node add transaction by adding node 'ns_1@10.1.2.33' to nodes_wanted [user:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.33' came up. [ns_server:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30','ns_1@10.1.2.31', 'ns_1@10.1.2.32','ns_1@10.1.2.33'] [ns_server:warn] [2012-04-10 18:22:48] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.33'} process down: noproc [cluster:info] [2012-04-10 18:22:48] [ns_1@10.1.2.30:ns_cluster:ns_cluster:node_add_transaction:484] Started node add transaction by adding node 'ns_1@10.1.2.34' to nodes_wanted [user:info] [2012-04-10 18:22:49] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.34' came up. [ns_server:info] [2012-04-10 18:22:49] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30','ns_1@10.1.2.31', 'ns_1@10.1.2.32','ns_1@10.1.2.33', 'ns_1@10.1.2.34'] [ns_server:warn] [2012-04-10 18:22:49] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.34'} process down: noproc [cluster:info] [2012-04-10 18:22:49] [ns_1@10.1.2.30:ns_cluster:ns_cluster:node_add_transaction:484] Started node add transaction by adding node 'ns_1@10.1.2.35' to nodes_wanted [user:info] [2012-04-10 18:22:50] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:145] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.35' came up. [ns_server:info] [2012-04-10 18:22:50] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30','ns_1@10.1.2.31', 'ns_1@10.1.2.32','ns_1@10.1.2.33', 'ns_1@10.1.2.34','ns_1@10.1.2.35'] [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.35'} process down: noproc [user:info] [2012-04-10 18:22:50] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:idle:371] Starting rebalance, KeepNodes = ['ns_1@10.1.2.32','ns_1@10.1.2.34', 'ns_1@10.1.2.30','ns_1@10.1.2.31', 'ns_1@10.1.2.35','ns_1@10.1.2.33'], EjectNodes = [] [rebalance:info] [2012-04-10 18:22:50] [ns_1@10.1.2.30:<0.13296.0>:ns_rebalancer:rebalance:115] Rebalancing bucket "default" with config [{sasl_password,[]}, {num_replicas,1}, {replica_index,true}, {ram_quota,1435500544}, {auth_type,sasl}, {type,membase}, {num_vbuckets,256}, {servers,['ns_1@10.1.2.30']}, {map, [['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30',undefined]]}] [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.34'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.32'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.31'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.33'} process down: noproc [ns_server:warn] [2012-04-10 18:22:50] [ns_1@10.1.2.30:'capi_ddoc_replication_srv-default':cb_generic_replication_srv:handle_info:114] Remote server node {'capi_ddoc_replication_srv-default','ns_1@10.1.2.35'} process down: noproc [stats:error] [2012-04-10 18:22:50] [ns_1@10.1.2.30:<0.10155.0>:stats_reader:log_bad_responses:185] Bad replies: [{'ns_1@10.1.2.32', {error, {exit, {aborted, {no_exists, ['stats_archiver-default-minute']}}}}}] [stats:error] [2012-04-10 18:22:50] [ns_1@10.1.2.30:<0.10155.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.35'] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.31': ["default"] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.34': ["default"] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.33': ["default"] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.35': ["default"] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_doctor:ns_doctor:update_status:209] The following buckets became ready on node 'ns_1@10.1.2.32': ["default"] [rebalance:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13296.0>:ns_rebalancer:wait_for_memcached:276] Waiting for ['ns_1@10.1.2.32','ns_1@10.1.2.34','ns_1@10.1.2.31', 'ns_1@10.1.2.35','ns_1@10.1.2.33'] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:cb_replication:cb_replication:handle_call:155] Switching replication mode for default from compat to new [rebalance:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13296.0>:ns_rebalancer:rebalance:167] Target map (distance: {213,213,213}): [['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.31'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.32'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.33'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.34'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.30','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.30'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.32'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.33'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.34'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.31','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.30'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.31'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.33'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.34'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.32','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.30'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.31'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.32'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.34'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.33','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.30'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.31'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.32'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.33'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.34','ns_1@10.1.2.35'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.30'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.31'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.32'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.33'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34'], ['ns_1@10.1.2.35','ns_1@10.1.2.34']] [rebalance:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13357.0>:ns_vbucket_mover:init:83] The following count of vbuckets do not need to be moved at all: 0 [rebalance:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13357.0>:ns_vbucket_mover:init:84] The following moves are planned: [{'ns_1@10.1.2.30',[{0, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {1, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {2, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {3, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {4, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {5, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {6, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {7, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {8, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.31']}, {9, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {10, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {11, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {12, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {13, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {14, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {15, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {16, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {17, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.32']}, {18, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {19, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {20, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {21, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {22, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {23, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {24, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {25, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.33']}, {26, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {27, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {28, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {29, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {30, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {31, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {32, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {33, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {34, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.34']}, {35, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {36, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {37, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {38, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {39, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {40, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {41, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {42, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.30','ns_1@10.1.2.35']}, {43, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {44, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {45, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {46, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {47, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {48, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {49, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {50, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {51, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.30']}, {52, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {53, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {54, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {55, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {56, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {57, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {58, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {59, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.32']}, {60, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {61, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {62, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {63, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {64, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {65, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {66, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {67, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {68, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.33']}, {69, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {70, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {71, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {72, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {73, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {74, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {75, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {76, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.34']}, {77, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {78, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {79, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {80, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {81, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {82, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {83, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {84, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {85, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.31','ns_1@10.1.2.35']}, {86, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {87, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {88, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {89, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {90, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {91, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {92, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {93, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {94, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.30']}, {95, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {96, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {97, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {98, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {99, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {100, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {101, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {102, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.31']}, {103, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {104, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {105, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {106, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {107, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {108, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {109, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {110, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {111, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.33']}, {112, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {113, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {114, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {115, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {116, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {117, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {118, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {119, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {120, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.34']}, {121, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {122, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {123, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {124, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {125, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {126, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {127, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {128, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.32','ns_1@10.1.2.35']}, {129, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {130, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {131, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {132, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {133, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {134, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {135, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {136, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.30']}, {137, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {138, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {139, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {140, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {141, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {142, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {143, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {144, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {145, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.31']}, {146, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {147, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {148, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {149, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {150, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {151, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {152, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {153, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {154, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.32']}, {155, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {156, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {157, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {158, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {159, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {160, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {161, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {162, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.34']}, {163, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {164, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {165, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {166, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {167, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {168, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {169, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {170, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {171, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.33','ns_1@10.1.2.35']}, {172, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {173, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {174, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {175, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {176, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {177, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {178, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {179, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {180, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.30']}, {181, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {182, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {183, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {184, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {185, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {186, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {187, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {188, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.31']}, {189, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {190, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {191, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {192, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {193, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {194, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {195, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {196, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.32']}, {197, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {198, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {199, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {200, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {201, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {202, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {203, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {204, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.33']}, {205, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {206, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {207, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {208, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {209, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {210, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {211, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {212, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {213, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.34','ns_1@10.1.2.35']}, {214, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {215, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {216, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {217, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {218, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {219, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {220, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {221, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.30']}, {222, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {223, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {224, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {225, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {226, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {227, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {228, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {229, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {230, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.31']}, {231, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {232, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {233, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {234, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {235, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {236, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {237, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {238, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.32']}, {239, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {240, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {241, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {242, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {243, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {244, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {245, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {246, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.33']}, {247, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {248, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {249, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {250, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {251, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {252, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {253, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {254, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}, {255, ['ns_1@10.1.2.30',undefined], ['ns_1@10.1.2.35','ns_1@10.1.2.34']}]}] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13369.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_0_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13369.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13367.0>,shutdown} [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',0}] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13389.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0]) [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13395.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_1_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13395.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13394.0>,shutdown} [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',1}] [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13418.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:<0.13418.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1]) [ns_server:info] [2012-04-10 18:22:51] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_0_'ns_1@10.1.2.31' - 0, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_1_'ns_1@10.1.2.31' - 1, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 0, kfill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13424.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_2_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13424.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13423.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',2}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13448.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13448.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13454.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_3_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13454.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13453.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',3}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13477.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13477.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 1, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_2_'ns_1@10.1.2.31' - 2, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 2, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_3_'ns_1@10.1.2.31' - 3, kfill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13483.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_4_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13483.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13482.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',4}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13505.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2, 3], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13505.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3, 4]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13511.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_5_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13511.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13510.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',5}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13534.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2, 3, 4], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13534.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3, 4, 5]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_4_'ns_1@10.1.2.31' - 4, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 3, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_5_'ns_1@10.1.2.31' - 5, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 4, kfill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13540.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_6_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13540.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13539.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',6}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13564.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2, 3, 4, 5], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13564.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3, 4, 5, 6]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13570.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_7_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13570.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13569.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',7}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13593.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2, 3, 4, 5, 6], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13593.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3, 4, 5, 6, 7]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.05351170568561869]], [['ns_1@10.1.2.31'|0.0]], [['ns_1@10.1.2.32'|0.0]], [['ns_1@10.1.2.33'|0.0]], [['ns_1@10.1.2.34'|0.0]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 5, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_6_'ns_1@10.1.2.31' - 6, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 6, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_7_'ns_1@10.1.2.31' - 7, kfill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13599.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_8_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13599.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13598.0>,shutdown} [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.31',8}] [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13629.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', {new_child_id, [0, 1, 2, 3, 4, 5, 6, 7], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:<0.13629.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.31', [0, 1, 2, 3, 4, 5, 6, 7, 8]) [ns_server:info] [2012-04-10 18:22:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_8_'ns_1@10.1.2.31' - 8, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 7, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.31 - 8, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_9_'ns_1@10.1.2.32' - 9, kfill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13635.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_9_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13635.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13634.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',9}] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13668.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', "\t") [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13673.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_10_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13673.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13672.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',10}] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13697.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, "\t", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13697.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', "\t\n") [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 9, kfill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_10_'ns_1@10.1.2.32' - 10, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13702.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_11_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13702.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13701.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',11}] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13726.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, "\t\n", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13726.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', "\t\n\v") [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 10, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_11_'ns_1@10.1.2.32' - 11, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 11, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_12_'ns_1@10.1.2.32' - 12, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13731.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_12_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13731.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13730.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',12}] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13755.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, "\t\n\v", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13755.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', "\t\n\v\f") [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13760.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_13_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13760.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13759.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',13}] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13784.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, "\t\n\v\f", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13784.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', "\t\n\v\f\r") [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 12, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_13_'ns_1@10.1.2.32' - 13, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13789.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_14_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13789.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13788.0>,shutdown} [ns_server:info] [2012-04-10 18:22:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',14}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13813.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, "\t\n\v\f\r", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13813.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', [9, 10, 11, 12, 13, 14]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13818.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_15_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13818.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13817.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',15}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13842.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, [9, 10, 11, 12, 13, 14], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13842.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', [9, 10, 11, 12, 13, 14, 15]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_14_'ns_1@10.1.2.32' - 14, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 13, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 14, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_15_'ns_1@10.1.2.32' - 15, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13847.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_16_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13847.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13846.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',16}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13871.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, [9, 10, 11, 12, 13, 14, 15], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13871.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', [9, 10, 11, 12, 13, 14, 15, 16]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_16_'ns_1@10.1.2.32' - 16, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 15, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_17_'ns_1@10.1.2.32' - 17, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 16, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13876.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_17_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13876.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13875.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.32',17}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13900.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', {new_child_id, [9, 10, 11, 12, 13, 14, 15, 16], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13900.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.32', [9, 10, 11, 12, 13, 14, 15, 16, 17]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13905.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_18_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13905.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13904.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',18}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13931.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13935.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_19_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13935.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13934.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',19}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13967.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13967.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19]) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_18_'ns_1@10.1.2.33' - 18, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.32 - 17, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 18, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_19_'ns_1@10.1.2.33' - 19, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13971.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_20_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13971.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13970.0>,shutdown} [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',20}] [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13996.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:54] [ns_1@10.1.2.30:<0.13996.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14000.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_21_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14000.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.13999.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',21}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14026.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19, 20], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14026.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20, 21]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 19, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_20_'ns_1@10.1.2.33' - 20, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 20, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_21_'ns_1@10.1.2.33' - 21, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14030.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_22_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14030.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14029.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',22}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14055.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19, 20, 21], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14055.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20, 21, 22]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14059.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_23_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14059.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14058.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',23}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14084.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19, 20, 21, 22], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14084.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20, 21, 22, 23]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 21, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_22_'ns_1@10.1.2.33' - 22, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_23_'ns_1@10.1.2.33' - 23, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 22, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14088.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_24_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14088.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14087.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',24}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14113.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19, 20, 21, 22, 23], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14113.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20, 21, 22, 23, 24]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14117.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_25_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14117.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14116.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.33',25}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14142.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', {new_child_id, [18, 19, 20, 21, 22, 23, 24], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14142.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.33', [18, 19, 20, 21, 22, 23, 24, 25]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_24_'ns_1@10.1.2.33' - 24, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 23, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_25_'ns_1@10.1.2.33' - 25, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 24, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14146.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_26_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14146.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14145.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',26}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14172.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14175.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_27_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14175.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14174.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',27}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14200.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14200.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27]) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_26_'ns_1@10.1.2.34' - 26, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.33 - 25, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 26, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_27_'ns_1@10.1.2.34' - 27, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14203.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_28_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14203.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14202.0>,shutdown} [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',28}] [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14229.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:55] [ns_1@10.1.2.30:<0.14229.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14232.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_29_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14232.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14231.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',29}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14258.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14258.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 27, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_28_'ns_1@10.1.2.34' - 28, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_29_'ns_1@10.1.2.34' - 29, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 28, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14261.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_30_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14261.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14260.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',30}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14287.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28, 29], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14287.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29, 30]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14290.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_31_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14290.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14289.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:ns_config_rep:ns_config_rep:do_pull:258] Pulling config from: 'ns_1@10.1.2.34' [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',31}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14318.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28, 29, 30], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14318.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29, 30, 31]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_30_'ns_1@10.1.2.34' - 30, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 29, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 30, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_31_'ns_1@10.1.2.34' - 31, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14321.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_32_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14321.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14320.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',32}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14348.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28, 29, 30, 31], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14348.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29, 30, 31, 32]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14351.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_33_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14351.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14350.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',33}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14384.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28, 29, 30, 31, 32], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14384.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29, 30, 31, 32, 33]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 31, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_32_'ns_1@10.1.2.34' - 32, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_33_'ns_1@10.1.2.34' - 33, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 32, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14387.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_34_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14387.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14386.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.34',34}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14413.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', {new_child_id, [26, 27, 28, 29, 30, 31, 32, 33], 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14413.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.34', [26, 27, 28, 29, 30, 31, 32, 33, 34]) [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14416.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_35_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14416.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14415.0>,shutdown} [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',35}] [ns_server:info] [2012-04-10 18:22:56] [ns_1@10.1.2.30:<0.14443.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 33, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_34_'ns_1@10.1.2.34' - 34, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.34 - 34, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_35_'ns_1@10.1.2.35' - 35, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14445.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_36_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14445.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14444.0>,shutdown} [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',36}] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14472.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14472.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14474.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_37_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14474.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14473.0>,shutdown} [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',37}] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14501.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14501.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 35, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_36_'ns_1@10.1.2.35' - 36, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 36, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_37_'ns_1@10.1.2.35' - 37, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14503.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_38_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14503.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14502.0>,shutdown} [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',38}] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14530.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$%", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14530.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%&") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14532.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_39_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14532.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14531.0>,shutdown} [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',39}] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14559.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$%&", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14559.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%&'") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 37, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_38_'ns_1@10.1.2.35' - 38, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 38, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_39_'ns_1@10.1.2.35' - 39, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14561.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_40_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14561.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14560.0>,shutdown} [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',40}] [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14588.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$%&'", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:<0.14588.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%&'(") [ns_server:info] [2012-04-10 18:22:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 39, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_40_'ns_1@10.1.2.35' - 40, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14590.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_41_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14590.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14589.0>,shutdown} [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',41}] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14628.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$%&'(", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14628.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%&'()") [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 40, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_41_'ns_1@10.1.2.35' - 41, fill is completed with VBuckets [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14630.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_42_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14630.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14629.0>,shutdown} [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.30','ns_1@10.1.2.35',42}] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14657.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', {new_child_id, "#$%&'()", 'ns_1@10.1.2.30'}) [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14657.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.30', 'ns_1@10.1.2.35', "#$%&'()*") [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_42_'ns_1@10.1.2.35' - 42, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 41, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_43_'ns_1@10.1.2.31' - 43, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - Connection is re-established. Rollback unacked messages... memcached<0.396.0>: TAP (Producer) eq_tapq:replication_ns_1@10.1.2.35 - 42, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14670.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+"},{checkpoints,[{43,0}]},{name,"rebalance_43"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14670.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 43 [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14670.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14659.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_43_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14659.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14658.0>,shutdown} [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',43}] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14683.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+") [error_logger:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14689.0>}, {name,{new_child_id,"+",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14689.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+"}, {checkpoints,[{43,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14689.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 43 [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_43 - 43, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_43 - VBucket <43> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_44_'ns_1@10.1.2.31' - 44, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14693.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,","},{checkpoints,[{44,0}]},{name,"rebalance_44"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14693.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 44 [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14693.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14691.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_44_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14691.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14690.0>,shutdown} [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',44}] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14707.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14689.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14707.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,") [error_logger:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14713.0>}, {name,{new_child_id,"+,",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14713.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,"}, {checkpoints,[{43,0},{44,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14713.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 44 [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_44 - 44, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_44 - VBucket <44> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_45_'ns_1@10.1.2.31' - 45, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14724.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"-"},{checkpoints,[{45,0}]},{name,"rebalance_45"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14724.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 45 [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14724.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14715.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_45_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14715.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14714.0>,shutdown} [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',45}] [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14737.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14713.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14737.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-") [error_logger:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14743.0>}, {name,{new_child_id,"+,-",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14743.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-"}, {checkpoints,[{43,0},{44,0},{45,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:<0.14743.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 45 [ns_server:info] [2012-04-10 18:22:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_45 - 45, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_45 - VBucket <45> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_46_'ns_1@10.1.2.31' - 46, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14747.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"."},{checkpoints,[{46,0}]},{name,"rebalance_46"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14747.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 46 [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14747.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14745.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_46_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14745.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14744.0>,shutdown} [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',46}] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14760.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14743.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14760.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-.") [error_logger:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14766.0>}, {name,{new_child_id,"+,-.",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-."}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14766.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-."}, {checkpoints,[{43,0},{44,0},{45,0},{46,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14766.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 46 [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_46 - 46, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_46 - VBucket <46> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_47_'ns_1@10.1.2.31' - 47, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14770.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"/"},{checkpoints,[{47,0}]},{name,"rebalance_47"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14770.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 47 [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14770.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14768.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_47_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14768.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14767.0>,shutdown} [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',47}] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14783.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-.", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14766.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14783.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-./") [error_logger:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14789.0>}, {name,{new_child_id,"+,-./",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-./"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14789.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-./"}, {checkpoints,[{43,0},{44,0},{45,0},{46,0},{47,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14789.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 47 [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_47 - 47, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_47 - VBucket <47> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_48_'ns_1@10.1.2.31' - 48, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14793.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"0"},{checkpoints,[{48,0}]},{name,"rebalance_48"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14793.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 48 [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14793.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14791.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_48_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14791.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14790.0>,shutdown} [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',48}] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14806.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-./", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14789.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14806.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-./0") [error_logger:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14812.0>}, {name,{new_child_id,"+,-./0",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-./0"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14812.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-./0"}, {checkpoints,[{43,0},{44,0},{45,0},{46,0},{47,0},{48,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14812.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 48 [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_48 - 48, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_48 - VBucket <48> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_49_'ns_1@10.1.2.31' - 49, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14816.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"1"},{checkpoints,[{49,0}]},{name,"rebalance_49"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14816.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 49 [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14816.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14814.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_49_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14814.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14813.0>,shutdown} [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',49}] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14829.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-./0", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14812.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14829.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-./01") [error_logger:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14835.0>}, {name,{new_child_id,"+,-./01",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-./01"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14835.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-./01"}, {checkpoints,[{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14835.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 49 [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_49 - 49, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_49 - VBucket <49> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_50_'ns_1@10.1.2.31' - 50, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14839.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"2"},{checkpoints,[{50,0}]},{name,"rebalance_50"},{takeover,true}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14839.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 50 [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14839.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14837.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_50_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14837.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14836.0>,shutdown} [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',50}] [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14852.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-./01", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14835.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14852.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-./012") [error_logger:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14858.0>}, {name,{new_child_id,"+,-./012",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-./012"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14858.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-./012"}, {checkpoints,[{43,0},{44,0},{45,0},{46,0},{47,0},{48,0},{49,0},{50,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:22:59] [ns_1@10.1.2.30:<0.14858.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 50 [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_50 - 50, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_50 - VBucket <50> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_51_'ns_1@10.1.2.31' - 51, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14862.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"3"},{checkpoints,[{51,0}]},{name,"rebalance_51"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14862.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 51 [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14862.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14860.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_51_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14860.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14859.0>,shutdown} [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.30',51}] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14875.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', {new_child_id, "+,-./012", 'ns_1@10.1.2.31'}) [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14858.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14875.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.30', "+,-./0123") [error_logger:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.14881.0>}, {name,{new_child_id,"+,-./0123",'ns_1@10.1.2.31'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.31",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"+,-./0123"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14881.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"+,-./0123"}, {checkpoints,[{43,0}, {44,0}, {45,0}, {46,0}, {47,0}, {48,0}, {49,0}, {50,0}, {51,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14881.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 51 [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_51 - 51, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_51 - VBucket <51> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_52_'ns_1@10.1.2.31' - 52, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_52_'ns_1@10.1.2.32' - 52, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14885.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"4"},{checkpoints,[{52,0}]},{name,"rebalance_52"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14885.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 52 [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14885.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14883.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_52_'ns_1@10.1.2.31'">>, <<"replication_building_52_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14883.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14882.0>,shutdown} [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',52}] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14900.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "4") [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"+,-./01"}, {ignore,[]}, {replica,[]}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "+,-./01" Cleanup: [] Replica: [] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_52 - 52, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_52 - VBucket <52> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_53_'ns_1@10.1.2.31' - 53, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_53_'ns_1@10.1.2.32' - 53, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14906.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"5"},{checkpoints,[{53,0}]},{name,"rebalance_53"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14906.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 53 [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14906.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14905.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_53_'ns_1@10.1.2.31'">>, <<"replication_building_53_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14905.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14904.0>,shutdown} [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',53}] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14922.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "4", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14922.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "45") [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"+-./01"}, {ignore,[]}, {replica,","}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "+-./01" Cleanup: "," Replica: "," ReplicaCleanup: [] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"-./01"}, {ignore,[]}, {replica,"+,"}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "-./01" Cleanup: "+" Replica: "+," ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_53 - 53, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_53 - VBucket <53> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_54_'ns_1@10.1.2.31' - 54, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_54_'ns_1@10.1.2.32' - 54, fill is completed with VBuckets [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"-/01"}, {ignore,[]}, {replica,"+,."}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "-/01" Cleanup: "." Replica: "+,." ReplicaCleanup: [] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"/01"}, {ignore,[]}, {replica,"+,-."}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "/01" Cleanup: "-" Replica: "+,-." ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14943.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"6"},{checkpoints,[{54,0}]},{name,"rebalance_54"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14943.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 54 [rebalance:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14943.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"01"}, {ignore,[]}, {replica,"+,-./"}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "01" Cleanup: "/" Replica: "+,-./" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14927.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_54_'ns_1@10.1.2.31'">>, <<"replication_building_54_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:<0.14927.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.14926.0>,shutdown} [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54, 55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"1"}, {ignore,[]}, {replica,"+,-./0"}] [views:info] [2012-04-10 18:23:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,50,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "1" Cleanup: "0" Replica: "+,-./0" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,52,53,54,55, 56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,"1"}, {ignore,[]}, {replica,"+,-./0"}] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,52,53,54,55,56, 57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: "1" Cleanup: "2" Replica: "+,-./0" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,52,54,55,56, 57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,"1"}, {ignore,[]}, {replica,"+,-./0"}] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,52,54,55,56,57, 58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: "1" Cleanup: "5" Replica: "+,-./0" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_54 - 54, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_54 - VBucket <54> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,54,55,56,57, 58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,"1"}, {ignore,[]}, {replica,"+,-./0"}] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,51,54,55,56,57,58, 59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81, 82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: "1" Cleanup: "4" Replica: "+,-./0" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,54,55,56,57,58, 59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,"1"}, {ignore,[]}, {replica,"+,-./0"}] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,54,55,56,57,58,59, 60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82, 83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103, 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: "1" Cleanup: "3" Replica: "+,-./0" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,54,55,56,57,58, 59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80, 81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./01"}] [views:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,54,55,56,57,58,59, 60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82, 83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103, 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "1" Replica: "+,-./01" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',54}] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15049.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "45", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15049.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "456") [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15057.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"7"},{checkpoints,[{55,0}]},{name,"rebalance_55"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15057.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 55 [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15057.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15056.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_55_'ns_1@10.1.2.31'">>, <<"replication_building_55_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15056.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15055.0>,shutdown} [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',55}] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15072.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "456", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15072.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "4567") [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_55_'ns_1@10.1.2.31' - 55, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_55_'ns_1@10.1.2.32' - 55, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_55 - 55, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_55 - VBucket <55> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15078.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"8"},{checkpoints,[{56,0}]},{name,"rebalance_56"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15078.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 56 [rebalance:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15078.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15077.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_56_'ns_1@10.1.2.31'">>, <<"replication_building_56_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15077.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15076.0>,shutdown} [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',56}] [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15093.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "4567", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:<0.15093.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "45678") [ns_server:info] [2012-04-10 18:23:01] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_56_'ns_1@10.1.2.31' - 56, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_56_'ns_1@10.1.2.32' - 56, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_56 - 56, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_56 - VBucket <56> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15099.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"9"},{checkpoints,[{57,0}]},{name,"rebalance_57"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15099.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 57 [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15099.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15098.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_57_'ns_1@10.1.2.31'">>, <<"replication_building_57_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15098.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15097.0>,shutdown} [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',57}] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15114.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "45678", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15114.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "456789") [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_57_'ns_1@10.1.2.31' - 57, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_57_'ns_1@10.1.2.32' - 57, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_57 - 57, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_57 - VBucket <57> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15120.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,":"},{checkpoints,[{58,0}]},{name,"rebalance_58"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15120.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 58 [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15120.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15119.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_58_'ns_1@10.1.2.31'">>, <<"replication_building_58_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15119.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15118.0>,shutdown} [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',58}] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15135.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "456789", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15135.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "456789:") [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_58_'ns_1@10.1.2.31' - 58, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_58_'ns_1@10.1.2.32' - 58, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_58 - 58, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_58 - VBucket <58> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15141.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,";"},{checkpoints,[{59,0}]},{name,"rebalance_59"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15141.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 59 [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15141.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15140.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_59_'ns_1@10.1.2.31'">>, <<"replication_building_59_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15140.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15139.0>,shutdown} [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.32',59}] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15156.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', {new_child_id, "456789:", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15156.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.32', "456789:;") [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_59_'ns_1@10.1.2.32' - 59, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_59_'ns_1@10.1.2.31' - 59, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_59 - 59, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_59 - VBucket <59> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.34448160535117056]], [['ns_1@10.1.2.31'| 0.39534883720930236]], [['ns_1@10.1.2.32'|0.0]], [['ns_1@10.1.2.33'|0.0]], [['ns_1@10.1.2.34'|0.0]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15163.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"<"},{checkpoints,[{60,0}]},{name,"rebalance_60"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15163.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 60 [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15163.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15161.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_60_'ns_1@10.1.2.31'">>, <<"replication_building_60_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15161.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15160.0>,shutdown} [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',60}] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15186.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<") [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_60_'ns_1@10.1.2.31' - 60, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_60_'ns_1@10.1.2.33' - 60, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_60 - 60, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_60 - VBucket <60> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15191.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"="},{checkpoints,[{61,0}]},{name,"rebalance_61"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15191.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 61 [rebalance:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15191.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15190.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_61_'ns_1@10.1.2.31'">>, <<"replication_building_61_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15190.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15189.0>,shutdown} [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',61}] [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15214.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:<0.15214.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=") [ns_server:info] [2012-04-10 18:23:02] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_61_'ns_1@10.1.2.31' - 61, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_61_'ns_1@10.1.2.33' - 61, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_61 - 61, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_61 - VBucket <61> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15221.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,">"},{checkpoints,[{62,0}]},{name,"rebalance_62"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15221.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 62 [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15221.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15218.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_62_'ns_1@10.1.2.31'">>, <<"replication_building_62_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15218.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15217.0>,shutdown} [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',62}] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15237.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15237.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>") [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_62_'ns_1@10.1.2.31' - 62, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_62_'ns_1@10.1.2.33' - 62, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_62 - 62, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_62 - VBucket <62> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15242.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"?"},{checkpoints,[{63,0}]},{name,"rebalance_63"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15242.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 63 [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15242.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15241.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_63_'ns_1@10.1.2.31'">>, <<"replication_building_63_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15241.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15240.0>,shutdown} [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',63}] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15258.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15258.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?") [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_63_'ns_1@10.1.2.31' - 63, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_63_'ns_1@10.1.2.33' - 63, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_63 - 63, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_63 - VBucket <63> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15263.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"@"},{checkpoints,[{64,0}]},{name,"rebalance_64"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15263.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 64 [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15263.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15262.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_64_'ns_1@10.1.2.31'">>, <<"replication_building_64_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15262.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15261.0>,shutdown} [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',64}] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15279.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>?", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15279.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?@") [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_64_'ns_1@10.1.2.31' - 64, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_64_'ns_1@10.1.2.33' - 64, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_64 - 64, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_64 - VBucket <64> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15284.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"A"},{checkpoints,[{65,0}]},{name,"rebalance_65"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15284.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 65 [rebalance:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15284.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15283.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_65_'ns_1@10.1.2.31'">>, <<"replication_building_65_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15283.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15282.0>,shutdown} [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',65}] [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15300.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>?@", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:<0.15300.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?@A") [ns_server:info] [2012-04-10 18:23:03] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_65_'ns_1@10.1.2.31' - 65, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_65_'ns_1@10.1.2.33' - 65, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_65 - 65, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_65 - VBucket <65> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15305.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"B"},{checkpoints,[{66,0}]},{name,"rebalance_66"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15305.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 66 [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15305.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15304.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_66_'ns_1@10.1.2.31'">>, <<"replication_building_66_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15304.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15303.0>,shutdown} [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',66}] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15322.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>?@A", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15322.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?@AB") [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_66_'ns_1@10.1.2.31' - 66, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_66_'ns_1@10.1.2.33' - 66, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_66 - 66, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_66 - VBucket <66> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15327.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"C"},{checkpoints,[{67,0}]},{name,"rebalance_67"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15327.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 67 [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15327.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15326.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_67_'ns_1@10.1.2.31'">>, <<"replication_building_67_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15326.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15325.0>,shutdown} [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',67}] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15343.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>?@AB", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15343.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?@ABC") [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_67_'ns_1@10.1.2.33' - 67, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_67_'ns_1@10.1.2.31' - 67, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_67 - 67, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_67 - VBucket <67> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15348.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"D"},{checkpoints,[{68,0}]},{name,"rebalance_68"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15348.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 68 [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15348.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15347.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_68_'ns_1@10.1.2.31'">>, <<"replication_building_68_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15347.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15346.0>,shutdown} [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.33',68}] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15364.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', {new_child_id, "<=>?@ABC", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15364.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.33', "<=>?@ABCD") [views:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,56,57,58,59,60, 61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82, 83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103, 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"67"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,56,57,58,59,60,61, 62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84, 85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105, 106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_68_'ns_1@10.1.2.31' - 68, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_68_'ns_1@10.1.2.33' - 68, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_68 - 68, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_68 - VBucket <68> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15372.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"E"},{checkpoints,[{69,0}]},{name,"rebalance_69"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15372.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 69 [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15372.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15368.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_69_'ns_1@10.1.2.31'">>, <<"replication_building_69_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15368.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15367.0>,shutdown} [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',69}] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15389.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "E") [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_69_'ns_1@10.1.2.31' - 69, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_69_'ns_1@10.1.2.34' - 69, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_69 - 69, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_69 - VBucket <69> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15400.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"F"},{checkpoints,[{70,0}]},{name,"rebalance_70"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15400.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 70 [rebalance:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15400.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15399.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_70_'ns_1@10.1.2.31'">>, <<"replication_building_70_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15399.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15398.0>,shutdown} [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',70}] [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15417.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "E", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:<0.15417.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EF") [ns_server:info] [2012-04-10 18:23:04] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_70_'ns_1@10.1.2.31' - 70, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_70_'ns_1@10.1.2.34' - 70, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_70 - 70, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_70 - VBucket <70> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15421.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"G"},{checkpoints,[{71,0}]},{name,"rebalance_71"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15421.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 71 [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15421.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15420.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_71_'ns_1@10.1.2.31'">>, <<"replication_building_71_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15420.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15419.0>,shutdown} [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',71}] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15438.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EF", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15438.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFG") [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_71_'ns_1@10.1.2.31' - 71, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_71_'ns_1@10.1.2.34' - 71, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_71 - 71, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_71 - VBucket <71> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15442.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"H"},{checkpoints,[{72,0}]},{name,"rebalance_72"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15442.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 72 [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15442.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15441.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_72_'ns_1@10.1.2.31'">>, <<"replication_building_72_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15441.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15440.0>,shutdown} [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',72}] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15459.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EFG", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15459.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFGH") [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_72_'ns_1@10.1.2.31' - 72, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_72_'ns_1@10.1.2.34' - 72, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_72 - 72, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_72 - VBucket <72> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15463.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"I"},{checkpoints,[{73,0}]},{name,"rebalance_73"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15463.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 73 [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15463.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15462.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_73_'ns_1@10.1.2.31'">>, <<"replication_building_73_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15462.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15461.0>,shutdown} [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',73}] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15480.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EFGH", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15480.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFGHI") [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_73_'ns_1@10.1.2.31' - 73, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_73_'ns_1@10.1.2.34' - 73, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_73 - 73, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_73 - VBucket <73> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"6789:;=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "<" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"689:;=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "7" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"69:;=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "8" Replica: "+,-./0123" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15485.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"J"},{checkpoints,[{74,0}]},{name,"rebalance_74"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15485.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 74 [rebalance:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15485.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_74_'ns_1@10.1.2.34' - 74, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_74_'ns_1@10.1.2.31' - 74, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_74 - 74, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_74 - VBucket <74> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"6:;=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "9" Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15483.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_74_'ns_1@10.1.2.31'">>, <<"replication_building_74_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:<0.15483.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15482.0>,shutdown} [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"6;=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:05] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: ":" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"6=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: ";" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=>?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "6" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=?@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: ">" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=@ABCD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "?" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=@ACD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "B" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73, 74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=@AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,70,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "C" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,71,72,73,74, 75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96, 97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"=@AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,71,72,73,74,75, 76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "F" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,72,73,74,75, 76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,"=@AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,69,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "G" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"=@AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "E" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"@AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "=" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"AD"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "@" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"D"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "A" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76, 77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:06] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,73,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "D" Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',74}] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15688.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EFGHI", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15688.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFGHIJ") [views:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,74,75,76,77, 78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,72,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: [] Cleanup: "I" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,74,75,76,77,78, 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "H" Replica: "+,-./0123" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15712.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"K"},{checkpoints,[{75,0}]},{name,"rebalance_75"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15712.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 75 [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15712.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15691.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_75_'ns_1@10.1.2.31'">>, <<"replication_building_75_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15691.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15690.0>,shutdown} [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',75}] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15730.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EFGHIJ", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15730.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFGHIJK") [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_75_'ns_1@10.1.2.34' - 75, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_75_'ns_1@10.1.2.31' - 75, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_75 - 75, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_75 - VBucket <75> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15734.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"L"},{checkpoints,[{76,0}]},{name,"rebalance_76"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15734.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 76 [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15734.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15733.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_76_'ns_1@10.1.2.31'">>, <<"replication_building_76_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15733.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15732.0>,shutdown} [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.34',76}] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15751.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', {new_child_id, "EFGHIJK", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15751.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.34', "EFGHIJKL") [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_76_'ns_1@10.1.2.31' - 76, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_76_'ns_1@10.1.2.34' - 76, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_76 - 76, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_76 - VBucket <76> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15755.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"M"},{checkpoints,[{77,0}]},{name,"rebalance_77"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15755.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 77 [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15755.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15754.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_77_'ns_1@10.1.2.31'">>, <<"replication_building_77_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15754.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15753.0>,shutdown} [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',77}] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15773.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "M") [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_77_'ns_1@10.1.2.31' - 77, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_77_'ns_1@10.1.2.35' - 77, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_77 - 77, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_77 - VBucket <77> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15783.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"N"},{checkpoints,[{78,0}]},{name,"rebalance_78"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15783.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 78 [rebalance:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15783.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15775.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_78_'ns_1@10.1.2.31'">>, <<"replication_building_78_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.15775.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15774.0>,shutdown} [ns_server:info] [2012-04-10 18:23:07] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',78}] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15803.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "M", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15803.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MN") [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_78_'ns_1@10.1.2.31' - 78, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_78_'ns_1@10.1.2.35' - 78, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_78 - 78, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_78 - VBucket <78> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15806.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"O"},{checkpoints,[{79,0}]},{name,"rebalance_79"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15806.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 79 [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15806.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15805.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_79_'ns_1@10.1.2.31'">>, <<"replication_building_79_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15805.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15804.0>,shutdown} [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',79}] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15824.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MN", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15824.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNO") [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_79_'ns_1@10.1.2.31' - 79, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_79_'ns_1@10.1.2.35' - 79, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_79 - 79, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_79 - VBucket <79> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15827.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"P"},{checkpoints,[{80,0}]},{name,"rebalance_80"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15827.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 80 [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15827.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15826.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_80_'ns_1@10.1.2.31'">>, <<"replication_building_80_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15826.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15825.0>,shutdown} [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',80}] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15845.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNO", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15845.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOP") [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_80_'ns_1@10.1.2.31' - 80, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_80_'ns_1@10.1.2.35' - 80, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_80 - 80, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_80 - VBucket <80> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15848.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Q"},{checkpoints,[{81,0}]},{name,"rebalance_81"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15848.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 81 [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15848.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15847.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_81_'ns_1@10.1.2.31'">>, <<"replication_building_81_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15847.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15846.0>,shutdown} [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',81}] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15867.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNOP", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15867.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOPQ") [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_81_'ns_1@10.1.2.31' - 81, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_81_'ns_1@10.1.2.35' - 81, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_81 - 81, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_81 - VBucket <81> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15877.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"R"},{checkpoints,[{82,0}]},{name,"rebalance_82"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15877.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 82 [rebalance:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15877.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15869.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_82_'ns_1@10.1.2.31'">>, <<"replication_building_82_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15869.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15868.0>,shutdown} [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',82}] [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15895.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNOPQ", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:<0.15895.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOPQR") [ns_server:info] [2012-04-10 18:23:08] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_82_'ns_1@10.1.2.31' - 82, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_82_'ns_1@10.1.2.35' - 82, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_82 - 82, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_82 - VBucket <82> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15898.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"S"},{checkpoints,[{83,0}]},{name,"rebalance_83"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15898.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 83 [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15898.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15897.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_83_'ns_1@10.1.2.31'">>, <<"replication_building_83_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15897.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15896.0>,shutdown} [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',83}] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15916.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNOPQR", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15916.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOPQRS") [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_83_'ns_1@10.1.2.31' - 83, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_83_'ns_1@10.1.2.35' - 83, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_83 - 83, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_83 - VBucket <83> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15919.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"T"},{checkpoints,[{84,0}]},{name,"rebalance_84"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15919.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 84 [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15919.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15918.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_84_'ns_1@10.1.2.31'">>, <<"replication_building_84_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15918.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15917.0>,shutdown} [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',84}] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15937.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNOPQRS", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15937.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOPQRST") [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_84_'ns_1@10.1.2.31' - 84, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_84_'ns_1@10.1.2.35' - 84, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_84 - 84, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_84 - VBucket <84> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15940.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"U"},{checkpoints,[{85,0}]},{name,"rebalance_85"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15940.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 85 [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15940.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15939.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_85_'ns_1@10.1.2.31'">>, <<"replication_building_85_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15939.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15938.0>,shutdown} [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.31','ns_1@10.1.2.35',85}] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15958.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', {new_child_id, "MNOPQRST", 'ns_1@10.1.2.31'}) [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15958.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', "MNOPQRSTU") [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_85_'ns_1@10.1.2.31' - 85, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_85_'ns_1@10.1.2.35' - 85, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_85 - 85, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_85 - VBucket <85> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15961.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"V"},{checkpoints,[{86,0}]},{name,"rebalance_86"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15961.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 86 [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15961.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15960.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_86_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15960.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15959.0>,shutdown} [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',86}] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15974.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "V") [error_logger:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.15980.0>}, {name,{new_child_id,"V",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"V"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15980.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"V"}, {checkpoints,[{86,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15980.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 86 [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_86_'ns_1@10.1.2.32' - 86, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_86 - 86, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_86 - VBucket <86> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_87_'ns_1@10.1.2.32' - 87, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15984.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"W"},{checkpoints,[{87,0}]},{name,"rebalance_87"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15984.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 87 [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15984.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15982.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_87_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15982.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.15981.0>,shutdown} [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',87}] [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15997.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "V", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15980.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.15997.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VW") [error_logger:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16003.0>}, {name,{new_child_id,"VW",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VW"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.16003.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VW"}, {checkpoints,[{86,0},{87,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:09] [ns_1@10.1.2.30:<0.16003.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 87 [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_87 - 87, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_87 - VBucket <87> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_88_'ns_1@10.1.2.32' - 88, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16008.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"X"},{checkpoints,[{88,0}]},{name,"rebalance_88"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16008.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 88 [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16008.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16005.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_88_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16005.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16004.0>,shutdown} [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',88}] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16021.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VW", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16003.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16021.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWX") [error_logger:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16027.0>}, {name,{new_child_id,"VWX",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWX"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16027.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWX"}, {checkpoints,[{86,0},{87,0},{88,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16027.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 88 [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_88 - 88, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_88 - VBucket <88> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_89_'ns_1@10.1.2.32' - 89, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16031.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Y"},{checkpoints,[{89,0}]},{name,"rebalance_89"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16031.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 89 [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16031.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16029.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_89_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16029.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16028.0>,shutdown} [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',89}] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16044.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWX", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16027.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16044.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXY") [error_logger:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16050.0>}, {name,{new_child_id,"VWXY",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXY"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16050.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXY"}, {checkpoints,[{86,0},{87,0},{88,0},{89,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16050.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 89 [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"JKLMNOPQR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_89 - 89, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_89 - VBucket <89> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_90_'ns_1@10.1.2.32' - 90, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16054.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Z"},{checkpoints,[{90,0}]},{name,"rebalance_90"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16054.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 90 [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16054.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16052.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_90_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16052.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16051.0>,shutdown} [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',90}] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16067.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWXY", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16050.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16067.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXYZ") [error_logger:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16073.0>}, {name,{new_child_id,"VWXYZ",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXYZ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16073.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXYZ"}, {checkpoints,[{86,0},{87,0},{88,0},{89,0},{90,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16073.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 90 [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"KLMNOPQR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "J" Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_90 - 90, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_90 - VBucket <90> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_91_'ns_1@10.1.2.32' - 91, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16078.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"["},{checkpoints,[{91,0}]},{name,"rebalance_91"},{takeover,true}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"KLMNPQR"}, {replica,"+,-./0123"}] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16078.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 91 [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "O" Replica: "+,-./0123" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16078.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16075.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_91_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:<0.16075.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16074.0>,shutdown} [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"LMNPQR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "K" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"MNPQR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "L" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"MNQR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "P" Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_91 - 91, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_91 - VBucket <91> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"MNR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:10] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Q" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"MR"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "N" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"R"}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "M" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87, 88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,83,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "R" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,84,85,86,87,88, 89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,84,85,86,87,88,89, 90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "S" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,85,86,87,88,89, 90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,85,86,87,88,89,90, 91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109, 110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "T" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,87,88,89,90, 91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109, 110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,87,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "U" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,88,89,90,91, 92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,88,89,90,91,92, 93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "W" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,88,89,91,92, 93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110, 111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,86,88,89,91,92,93, 94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Z" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,88,89,91,92,93, 94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,88,89,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "V" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,89,91,92,93,94, 95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,89,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "X" Replica: "+,-./0123" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,91,92,93,94,95,96, 97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "Y" Replica: "+,-./0123" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',91}] [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16258.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWXYZ", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16073.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16258.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXYZ[") [error_logger:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16264.0>}, {name,{new_child_id,"VWXYZ[",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXYZ["}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,92,93,94,95,96, 97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113, 114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,92,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "[" Replica: "+,-./0123" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16264.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXYZ["}, {checkpoints,[{86,0},{87,0},{88,0},{89,0},{90,0},{91,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16264.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 91 [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16279.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"\\"},{checkpoints,[{92,0}]},{name,"rebalance_92"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16279.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 92 [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16279.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16266.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_92_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16266.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16265.0>,shutdown} [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',92}] [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16292.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWXYZ[", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16264.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16292.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXYZ[\\") [error_logger:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16298.0>}, {name,{new_child_id,"VWXYZ[\\",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXYZ[\\"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123"}] [views:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "\\" Replica: "+,-./0123" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16298.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXYZ[\\"}, {checkpoints,[{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [ns_server:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_92_'ns_1@10.1.2.32' - 92, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_92 - 92, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_92 - VBucket <92> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:11] [ns_1@10.1.2.30:<0.16298.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 92 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16312.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"]"},{checkpoints,[{93,0}]},{name,"rebalance_93"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16312.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 93 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16312.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16300.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_93_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16300.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16299.0>,shutdown} [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',93}] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16325.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWXYZ[\\", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16298.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16325.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXYZ[\\]") [error_logger:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16331.0>}, {name,{new_child_id,"VWXYZ[\\]",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXYZ[\\]"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16331.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXYZ[\\]"}, {checkpoints,[{86,0},{87,0},{88,0},{89,0},{90,0},{91,0},{92,0},{93,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16331.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 93 [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_93_'ns_1@10.1.2.32' - 93, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_93 - 93, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_93 - VBucket <93> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_94_'ns_1@10.1.2.32' - 94, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16335.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"^"},{checkpoints,[{94,0}]},{name,"rebalance_94"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16335.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 94 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16335.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16333.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_94_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16333.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16332.0>,shutdown} [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.30',94}] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16348.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', {new_child_id, "VWXYZ[\\]", 'ns_1@10.1.2.32'}) [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16331.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16348.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.30', "VWXYZ[\\]^") [error_logger:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.16354.0>}, {name,{new_child_id,"VWXYZ[\\]^",'ns_1@10.1.2.32'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.32",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"VWXYZ[\\]^"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16354.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"VWXYZ[\\]^"}, {checkpoints,[{86,0}, {87,0}, {88,0}, {89,0}, {90,0}, {91,0}, {92,0}, {93,0}, {94,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16354.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 94 [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_94 - 94, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_94 - VBucket <94> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_95_'ns_1@10.1.2.31' - 95, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_95_'ns_1@10.1.2.32' - 95, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16358.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"_"},{checkpoints,[{95,0}]},{name,"rebalance_95"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16358.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 95 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16358.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16356.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_95_'ns_1@10.1.2.32'">>, <<"replication_building_95_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16356.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16355.0>,shutdown} [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',95}] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16372.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_") [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.46488294314381273]], [['ns_1@10.1.2.31'| 0.9767441860465116]], [['ns_1@10.1.2.32'| 0.2325581395348837]], [['ns_1@10.1.2.33'|0.0]], [['ns_1@10.1.2.34'|0.0]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_95 - 95, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_95 - VBucket <95> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_96_'ns_1@10.1.2.32' - 96, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_96_'ns_1@10.1.2.31' - 96, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16388.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"`"},{checkpoints,[{96,0}]},{name,"rebalance_96"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16388.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 96 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16388.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16378.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_96_'ns_1@10.1.2.32'">>, <<"replication_building_96_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16378.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16377.0>,shutdown} [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',96}] [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16402.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16402.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`") [ns_server:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_96 - 96, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_96 - VBucket <96> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_97_'ns_1@10.1.2.32' - 97, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_97_'ns_1@10.1.2.31' - 97, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16417.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"a"},{checkpoints,[{97,0}]},{name,"rebalance_97"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16417.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 97 [rebalance:info] [2012-04-10 18:23:12] [ns_1@10.1.2.30:<0.16417.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16408.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_97_'ns_1@10.1.2.32'">>, <<"replication_building_97_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16408.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16407.0>,shutdown} [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',97}] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16434.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16434.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`a") [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_97 - 97, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_97 - VBucket <97> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_98_'ns_1@10.1.2.32' - 98, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_98_'ns_1@10.1.2.31' - 98, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16441.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"b"},{checkpoints,[{98,0}]},{name,"rebalance_98"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16441.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 98 [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16441.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16440.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_98_'ns_1@10.1.2.32'">>, <<"replication_building_98_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16440.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16439.0>,shutdown} [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',98}] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16455.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`a", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16455.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`ab") [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_98 - 98, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_98 - VBucket <98> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_99_'ns_1@10.1.2.32' - 99, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_99_'ns_1@10.1.2.31' - 99, fill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16462.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"c"},{checkpoints,[{99,0}]},{name,"rebalance_99"},{takeover,true}] [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16462.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 99 [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16462.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16461.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_99_'ns_1@10.1.2.32'">>, <<"replication_building_99_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16461.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16460.0>,shutdown} [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',99}] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16476.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`ab", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16476.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`abc") [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_99 - 99, fill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_99 - VBucket <99> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_100_'ns_1@10.1.2.32' - 100, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_100_'ns_1@10.1.2.31' - 100, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16483.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"d"}, {checkpoints,[{100,0}]}, {name,"rebalance_100"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16483.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 100 [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16483.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16482.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_100_'ns_1@10.1.2.32'">>, <<"replication_building_100_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16482.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16481.0>,shutdown} [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',100}] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16497.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`abc", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16497.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`abcd") [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_100 - 100, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_100 - VBucket <100> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_101_'ns_1@10.1.2.32' - 101, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_101_'ns_1@10.1.2.31' - 101, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16504.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"e"}, {checkpoints,[{101,0}]}, {name,"rebalance_101"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16504.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 101 [rebalance:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16504.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16503.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_101_'ns_1@10.1.2.32'">>, <<"replication_building_101_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16503.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16502.0>,shutdown} [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',101}] [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16518.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`abcd", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:13] [ns_1@10.1.2.30:<0.16518.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`abcde") [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_101 - 101, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_101 - VBucket <101> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_102_'ns_1@10.1.2.32' - 102, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_102_'ns_1@10.1.2.31' - 102, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16525.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"f"}, {checkpoints,[{102,0}]}, {name,"rebalance_102"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16525.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 102 [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16525.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16524.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_102_'ns_1@10.1.2.32'">>, <<"replication_building_102_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16524.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16523.0>,shutdown} [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.31',102}] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16539.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', {new_child_id, "_`abcde", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16539.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.31', "_`abcdef") [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_102 - 102, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_102 - VBucket <102> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_103_'ns_1@10.1.2.33' - 103, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_103_'ns_1@10.1.2.32' - 103, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16546.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"g"}, {checkpoints,[{103,0}]}, {name,"rebalance_103"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16546.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 103 [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16546.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16545.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_103_'ns_1@10.1.2.32'">>, <<"replication_building_103_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16545.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16544.0>,shutdown} [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',103}] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16562.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "g") [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_103 - 103, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_103 - VBucket <103> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_104_'ns_1@10.1.2.32' - 104, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_104_'ns_1@10.1.2.33' - 104, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16567.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"h"}, {checkpoints,[{104,0}]}, {name,"rebalance_104"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16567.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 104 [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16567.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16566.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_104_'ns_1@10.1.2.32'">>, <<"replication_building_104_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16566.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16565.0>,shutdown} [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',104}] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16583.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "g", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16583.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "gh") [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16589.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"i"}, {checkpoints,[{105,0}]}, {name,"rebalance_105"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16589.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 105 [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_104 - 104, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_104 - VBucket <104> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_105_'ns_1@10.1.2.32' - 105, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_105_'ns_1@10.1.2.33' - 105, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16589.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16587.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_105_'ns_1@10.1.2.32'">>, <<"replication_building_105_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16587.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16586.0>,shutdown} [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',105}] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16612.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "gh", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16612.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghi") [views:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,93,94,95,96,97, 98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114, 115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131, 132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\"}] [views:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,93,94,95,96,97,98, 99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115, 116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132, 133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: [] Replica: "+,-./0123VWXYZ[\\" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_105 - 105, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_105 - VBucket <105> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_106_'ns_1@10.1.2.33' - 106, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_106_'ns_1@10.1.2.32' - 106, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16644.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"j"}, {checkpoints,[{106,0}]}, {name,"rebalance_106"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16644.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 106 [rebalance:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16644.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16616.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_106_'ns_1@10.1.2.32'">>, <<"replication_building_106_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16616.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16615.0>,shutdown} [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',106}] [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16665.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghi", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:14] [ns_1@10.1.2.30:<0.16665.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghij") [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_106 - 106, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_106 - VBucket <106> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_107_'ns_1@10.1.2.32' - 107, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_107_'ns_1@10.1.2.33' - 107, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16670.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"k"}, {checkpoints,[{107,0}]}, {name,"rebalance_107"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16670.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 107 [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16670.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16669.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_107_'ns_1@10.1.2.32'">>, <<"replication_building_107_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16669.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16668.0>,shutdown} [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',107}] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16686.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghij", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16686.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghijk") [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_107 - 107, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_107 - VBucket <107> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_108_'ns_1@10.1.2.33' - 108, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_108_'ns_1@10.1.2.32' - 108, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16691.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"l"}, {checkpoints,[{108,0}]}, {name,"rebalance_108"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16691.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 108 [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16691.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16690.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_108_'ns_1@10.1.2.32'">>, <<"replication_building_108_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16690.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16689.0>,shutdown} [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',108}] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16707.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghijk", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16707.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghijkl") [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_108 - 108, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_108 - VBucket <108> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_109_'ns_1@10.1.2.32' - 109, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_109_'ns_1@10.1.2.33' - 109, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16712.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"m"}, {checkpoints,[{109,0}]}, {name,"rebalance_109"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16712.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 109 [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16712.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16711.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_109_'ns_1@10.1.2.32'">>, <<"replication_building_109_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16711.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16710.0>,shutdown} [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',109}] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16728.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghijkl", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16728.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghijklm") [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_109 - 109, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_109 - VBucket <109> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_110_'ns_1@10.1.2.33' - 110, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_110_'ns_1@10.1.2.32' - 110, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16733.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"n"}, {checkpoints,[{110,0}]}, {name,"rebalance_110"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16733.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 110 [rebalance:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16733.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16732.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_110_'ns_1@10.1.2.32'">>, <<"replication_building_110_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16732.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16731.0>,shutdown} [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',110}] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16749.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghijklm", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:<0.16749.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghijklmn") [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,"]"}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\"}] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,95,96,97,98,99, 100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116, 117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133, 134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: "]" Cleanup: "^" Replica: "+,-./0123VWXYZ[\\" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,95,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,"]"}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\^"}] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,95,97,98,99,100, 101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117, 118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, 135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151, 152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: "]" Cleanup: "`" Replica: "+,-./0123VWXYZ[\\^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,"]"}, {ignore,"_"}, {replica,"+,-./0123VWXYZ[\\^"}] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: "]" Cleanup: "a" Replica: "+,-./0123VWXYZ[\\^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_110 - 110, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_110 - VBucket <110> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_111_'ns_1@10.1.2.32' - 111, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_111_'ns_1@10.1.2.33' - 111, ill is completed with VBuckets [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"_"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:15] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "]" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16773.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"o"}, {checkpoints,[{111,0}]}, {name,"rebalance_111"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16773.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 111 [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16773.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101, 102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118, 119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,101,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "_" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16753.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_111_'ns_1@10.1.2.32'">>, <<"replication_building_111_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16753.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16752.0>,shutdown} [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,102, 103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,98,99,100,102,103, 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "e" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,99,100,103,104, 105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, 122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138, 139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"b"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,99,100,103,104, 105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, 122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138, 139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "f" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,99,100,104,105, 106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"b"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,99,100,104,105, 106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "g" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_111 - 111, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_111 - VBucket <111> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,105,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"bc"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,105,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "j" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,105,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"bc"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,105,108, 109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "k" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,108,109, 110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"bc"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,100,104,108,109, 110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126, 127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "i" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"bcd"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "h" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"bc"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "d" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"b"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "c" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128, 129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "b" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.33',111}] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16914.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', {new_child_id, "ghijklmn", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16914.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.33', "ghijklmno") [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_112_'ns_1@10.1.2.34' - 112, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_112_'ns_1@10.1.2.32' - 112, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16926.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"p"}, {checkpoints,[{112,0}]}, {name,"rebalance_112"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16926.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 112 [rebalance:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16926.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16918.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_112_'ns_1@10.1.2.32'">>, <<"replication_building_112_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16918.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16917.0>,shutdown} [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',112}] [ns_server:info] [2012-04-10 18:23:16] [ns_1@10.1.2.30:<0.16943.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "p") [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_112 - 112, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_112 - VBucket <112> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_113_'ns_1@10.1.2.34' - 113, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_113_'ns_1@10.1.2.32' - 113, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16947.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"q"}, {checkpoints,[{113,0}]}, {name,"rebalance_113"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16947.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 113 [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16947.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16946.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_113_'ns_1@10.1.2.32'">>, <<"replication_building_113_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16946.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16945.0>,shutdown} [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',113}] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16964.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "p", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16964.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pq") [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_113 - 113, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_113 - VBucket <113> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_114_'ns_1@10.1.2.34' - 114, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_114_'ns_1@10.1.2.32' - 114, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16968.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"r"}, {checkpoints,[{114,0}]}, {name,"rebalance_114"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16968.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 114 [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16968.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16967.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_114_'ns_1@10.1.2.32'">>, <<"replication_building_114_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16967.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16966.0>,shutdown} [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',114}] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16985.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pq", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16985.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqr") [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_114 - 114, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_114 - VBucket <114> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_115_'ns_1@10.1.2.32' - 115, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_115_'ns_1@10.1.2.34' - 115, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16989.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"s"}, {checkpoints,[{115,0}]}, {name,"rebalance_115"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16989.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 115 [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16989.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16988.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_115_'ns_1@10.1.2.32'">>, <<"replication_building_115_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.16988.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.16987.0>,shutdown} [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',115}] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17006.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqr", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17006.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrs") [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_115 - 115, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_115 - VBucket <115> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_116_'ns_1@10.1.2.32' - 116, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_116_'ns_1@10.1.2.34' - 116, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17010.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"t"}, {checkpoints,[{116,0}]}, {name,"rebalance_116"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17010.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 116 [rebalance:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17010.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17009.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_116_'ns_1@10.1.2.32'">>, <<"replication_building_116_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17009.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17008.0>,shutdown} [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',116}] [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17027.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqrs", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:17] [ns_1@10.1.2.30:<0.17027.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrst") [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_116 - 116, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_116 - VBucket <116> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_117_'ns_1@10.1.2.32' - 117, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_117_'ns_1@10.1.2.34' - 117, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17038.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"u"}, {checkpoints,[{117,0}]}, {name,"rebalance_117"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17038.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 117 [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17038.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17030.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_117_'ns_1@10.1.2.32'">>, <<"replication_building_117_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17030.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17029.0>,shutdown} [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',117}] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17057.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqrst", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17057.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrstu") [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_117 - 117, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_117 - VBucket <117> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_118_'ns_1@10.1.2.32' - 118, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_118_'ns_1@10.1.2.34' - 118, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17061.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"v"}, {checkpoints,[{118,0}]}, {name,"rebalance_118"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17061.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 118 [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17061.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17060.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_118_'ns_1@10.1.2.32'">>, <<"replication_building_118_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17060.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17059.0>,shutdown} [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',118}] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17078.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqrstu", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17078.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrstuv") [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_118 - 118, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_118 - VBucket <118> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_119_'ns_1@10.1.2.32' - 119, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_119_'ns_1@10.1.2.34' - 119, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17082.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"w"}, {checkpoints,[{119,0}]}, {name,"rebalance_119"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17082.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 119 [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17082.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17081.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_119_'ns_1@10.1.2.32'">>, <<"replication_building_119_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17081.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17080.0>,shutdown} [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',119}] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17099.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqrstuv", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17099.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrstuvw") [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_119 - 119, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_119 - VBucket <119> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_120_'ns_1@10.1.2.32' - 120, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_120_'ns_1@10.1.2.34' - 120, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17103.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"x"}, {checkpoints,[{120,0}]}, {name,"rebalance_120"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17103.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 120 [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17103.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17102.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_120_'ns_1@10.1.2.32'">>, <<"replication_building_120_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17102.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17101.0>,shutdown} [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.34',120}] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17121.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', {new_child_id, "pqrstuvw", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17121.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.34', "pqrstuvwx") [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_120 - 120, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_120 - VBucket <120> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_121_'ns_1@10.1.2.35' - 121, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_121_'ns_1@10.1.2.32' - 121, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17132.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"y"}, {checkpoints,[{121,0}]}, {name,"rebalance_121"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17132.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 121 [rebalance:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17132.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17124.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_121_'ns_1@10.1.2.32'">>, <<"replication_building_121_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17124.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17123.0>,shutdown} [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',121}] [ns_server:info] [2012-04-10 18:23:18] [ns_1@10.1.2.30:<0.17150.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "y") [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_121 - 121, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_121 - VBucket <121> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_122_'ns_1@10.1.2.32' - 122, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_122_'ns_1@10.1.2.35' - 122, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17154.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"z"}, {checkpoints,[{122,0}]}, {name,"rebalance_122"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17154.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 122 [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17154.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17152.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_122_'ns_1@10.1.2.32'">>, <<"replication_building_122_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17152.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17151.0>,shutdown} [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',122}] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17172.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "y", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17172.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "yz") [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17175.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"{"}, {checkpoints,[{123,0}]}, {name,"rebalance_123"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17175.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 123 [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17175.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_122 - 122, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_122 - VBucket <122> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_123_'ns_1@10.1.2.35' - 123, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_123_'ns_1@10.1.2.32' - 123, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_123 - 123, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_123 - VBucket <123> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17174.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_123_'ns_1@10.1.2.32'">>, <<"replication_building_123_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17174.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17173.0>,shutdown} [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',123}] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17193.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "yz", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17193.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "yz{") [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17196.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"|"}, {checkpoints,[{124,0}]}, {name,"rebalance_124"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17196.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 124 [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17196.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17195.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_124_'ns_1@10.1.2.32'">>, <<"replication_building_124_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17195.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17194.0>,shutdown} [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',124}] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17214.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "yz{", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17214.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "yz{|") [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_124_'ns_1@10.1.2.32' - 124, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_124_'ns_1@10.1.2.35' - 124, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_124 - 124, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_124 - VBucket <124> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17217.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"}"}, {checkpoints,[{125,0}]}, {name,"rebalance_125"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17217.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 125 [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17217.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17216.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_125_'ns_1@10.1.2.32'">>, <<"replication_building_125_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17216.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17215.0>,shutdown} [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',125}] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17235.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "yz{|", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17235.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "yz{|}") [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_125_'ns_1@10.1.2.32' - 125, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_125_'ns_1@10.1.2.35' - 125, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_125 - 125, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_125 - VBucket <125> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_126_'ns_1@10.1.2.32' - 126, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17238.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"~"}, {checkpoints,[{126,0}]}, {name,"rebalance_126"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17238.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 126 [rebalance:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17238.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17237.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_126_'ns_1@10.1.2.32'">>, <<"replication_building_126_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17237.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17236.0>,shutdown} [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',126}] [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17256.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "yz{|}", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:<0.17256.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', "yz{|}~") [ns_server:info] [2012-04-10 18:23:19] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_126_'ns_1@10.1.2.35' - 126, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_126 - 126, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_126 - VBucket <126> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17259.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[127]}, {checkpoints,[{127,0}]}, {name,"rebalance_127"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17259.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 127 [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17259.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17258.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_127_'ns_1@10.1.2.32'">>, <<"replication_building_127_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17258.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17257.0>,shutdown} [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',127}] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17277.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, "yz{|}~", 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17277.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', [121, 122, 123, 124, 125, 126, 127]) [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_127_'ns_1@10.1.2.32' - 127, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_127_'ns_1@10.1.2.35' - 127, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_127 - 127, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_127 - VBucket <127> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17280.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[128]}, {checkpoints,[{128,0}]}, {name,"rebalance_128"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17280.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 128 [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17280.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17279.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_128_'ns_1@10.1.2.32'">>, <<"replication_building_128_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17279.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17278.0>,shutdown} [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.32','ns_1@10.1.2.35',128}] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17298.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', {new_child_id, [121, 122, 123, 124, 125, 126, 127], 'ns_1@10.1.2.32'}) [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17298.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.32', 'ns_1@10.1.2.35', [121, 122, 123, 124, 125, 126, 127, 128]) [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_128_'ns_1@10.1.2.32' - 128, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_128_'ns_1@10.1.2.35' - 128, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_128 - 128, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_128 - VBucket <128> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"lmnopqrstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17301.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129]}, {checkpoints,[{129,0}]}, {name,"rebalance_129"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17301.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 129 [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17301.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17300.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_129_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17300.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17299.0>,shutdown} [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',129}] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17314.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129]) [error_logger:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17320.0>}, {name,{new_child_id,[129],'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17320.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129]}, {checkpoints,[{129,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17320.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 129 [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_129_'ns_1@10.1.2.33' - 129, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_129 - 129, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_129 - VBucket <129> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17325.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[130]}, {checkpoints,[{130,0}]}, {name,"rebalance_130"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17325.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 130 [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17325.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17322.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_130_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17322.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17321.0>,shutdown} [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',130}] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17345.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17320.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17345.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130]) [error_logger:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17351.0>}, {name,{new_child_id,[129,130],'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17351.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130]}, {checkpoints,[{129,0},{130,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:<0.17351.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 130 [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"lnopqrstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "m" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_130_'ns_1@10.1.2.33' - 130, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_130 - 130, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_130 - VBucket <130> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"nopqrstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "l" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"noprstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "q" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"norstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "p" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:20] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_131_'ns_1@10.1.2.33' - 131, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17374.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[131]}, {checkpoints,[{131,0}]}, {name,"rebalance_131"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17374.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 131 [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17374.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17353.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_131_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17353.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17352.0>,shutdown} [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"orstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "n" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rstuv"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "o" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rstu"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "v" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_131 - 131, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_131 - VBucket <131> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rsu"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "t" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rs"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,119,120,121,122, 123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139, 140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "u" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,120,121,122,123, 124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140, 141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rs"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,120,121,122,123, 124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140, 141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "w" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"rs"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "x" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"r"}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "s" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141, 142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "r" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,121,122,123,124, 125,126,127,128,130,131,132,133,134,135,136,137,138,139,140,141,142, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [129] Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',131}] [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17502.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17351.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17502.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131]) [error_logger:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17508.0>}, {name,{new_child_id,[129,130,131],'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,122,123,124,125, 126,127,128,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,122,123,124,125, 126,127,128,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "y" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17508.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131]}, {checkpoints,[{129,0},{130,0},{131,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:<0.17508.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 131 [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,123,124,125,126, 127,128,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,123,124,125,126, 127,128,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "z" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,124,125,126,127, 128,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,124,125,126,127, 128,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "{" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,125,126,127,128, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:21] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,125,126,127,128, 130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "|" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17532.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[132]}, {checkpoints,[{132,0}]}, {name,"rebalance_132"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17532.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 132 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17532.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17510.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_132_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17510.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17509.0>,shutdown} [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_132_'ns_1@10.1.2.33' - 132, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_132 - 132, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_132 - VBucket <132> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,126,127,128,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,"+,-./0123VWXYZ[\\]^"}] [views:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,126,127,128,130, 131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "}" Replica: "+,-./0123VWXYZ[\\]^" ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',132}] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17572.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130, 131], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17508.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17572.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131, 132]) [error_logger:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17578.0>}, {name, {new_child_id,[129,130,131,132],'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131,132]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17578.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131,132]}, {checkpoints,[{129,0},{130,0},{131,0},{132,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17578.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 132 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17585.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[133]}, {checkpoints,[{133,0}]}, {name,"rebalance_133"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17585.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 133 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17585.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17580.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_133_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17580.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17579.0>,shutdown} [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',133}] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17598.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130, 131, 132], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17578.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17598.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131, 132, 133]) [error_logger:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17604.0>}, {name, {new_child_id, [129,130,131,132,133], 'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131,132,133]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17604.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131,132,133]}, {checkpoints,[{129,0},{130,0},{131,0},{132,0},{133,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17604.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 133 [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_133_'ns_1@10.1.2.33' - 133, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_133 - 133, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_133 - VBucket <133> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_134_'ns_1@10.1.2.33' - 134, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17608.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[134]}, {checkpoints,[{134,0}]}, {name,"rebalance_134"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17608.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 134 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17608.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17606.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_134_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17606.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17605.0>,shutdown} [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',134}] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17621.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130, 131, 132, 133], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17604.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17621.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131, 132, 133, 134]) [error_logger:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17627.0>}, {name, {new_child_id, [129,130,131,132,133,134], 'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131,132,133,134]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17627.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131,132,133,134]}, {checkpoints,[{129,0},{130,0},{131,0},{132,0},{133,0},{134,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17627.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 134 [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.5953177257525084]], [['ns_1@10.1.2.31'| 0.9767441860465116]], [['ns_1@10.1.2.32'| 0.9767441860465116]], [['ns_1@10.1.2.33'| 0.13953488372093026]], [['ns_1@10.1.2.34'|0.0]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17632.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[135]}, {checkpoints,[{135,0}]}, {name,"rebalance_135"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17632.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 135 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17632.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17629.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_135_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17629.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17628.0>,shutdown} [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_134 - 134, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_134 - VBucket <134> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_135_'ns_1@10.1.2.33' - 135, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_135 - 135, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_135 - VBucket <135> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',135}] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17652.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130, 131, 132, 133, 134], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17627.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17652.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131, 132, 133, 134, 135]) [error_logger:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17658.0>}, {name, {new_child_id, [129,130,131,132,133,134,135], 'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131,132,133,134,135]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17658.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131,132,133,134,135]}, {checkpoints,[{129,0},{130,0},{131,0},{132,0},{133,0},{134,0},{135,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17658.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 135 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17662.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[136]}, {checkpoints,[{136,0}]}, {name,"rebalance_136"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17662.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 136 [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17662.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17660.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_136_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17660.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17659.0>,shutdown} [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.30',136}] [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17675.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', {new_child_id, [129, 130, 131, 132, 133, 134, 135], 'ns_1@10.1.2.33'}) [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17658.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17675.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.30', [129, 130, 131, 132, 133, 134, 135, 136]) [error_logger:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.17688.0>}, {name, {new_child_id, [129,130,131,132,133,134,135,136], 'ns_1@10.1.2.33'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.33",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,[129,130,131,132,133,134,135,136]}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17688.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[129,130,131,132,133,134,135,136]}, {checkpoints,[{129,0}, {130,0}, {131,0}, {132,0}, {133,0}, {134,0}, {135,0}, {136,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:<0.17688.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 136 [ns_server:info] [2012-04-10 18:23:22] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_136_'ns_1@10.1.2.33' - 136, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_136 - 136, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_136 - VBucket <136> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_137_'ns_1@10.1.2.31' - 137, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_137_'ns_1@10.1.2.33' - 137, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17694.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[137]}, {checkpoints,[{137,0}]}, {name,"rebalance_137"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17694.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 137 [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17694.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17690.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_137_'ns_1@10.1.2.33'">>, <<"replication_building_137_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17690.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17689.0>,shutdown} [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',137}] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17708.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137]) [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17715.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[138]}, {checkpoints,[{138,0}]}, {name,"rebalance_138"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17715.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 138 [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_137 - 137, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_137 - VBucket <137> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_138_'ns_1@10.1.2.33' - 138, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_138_'ns_1@10.1.2.31' - 138, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_138 - 138, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_138 - VBucket <138> is going dead to complete vbucket takeover. [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17715.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17714.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_138_'ns_1@10.1.2.33'">>, <<"replication_building_138_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17714.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17713.0>,shutdown} [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',138}] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17729.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17729.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138]) [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17736.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[139]}, {checkpoints,[{139,0}]}, {name,"rebalance_139"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17736.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 139 [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17736.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17735.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_139_'ns_1@10.1.2.33'">>, <<"replication_building_139_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_139_'ns_1@10.1.2.33' - 139, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_139_'ns_1@10.1.2.31' - 139, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_139 - 139, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_139 - VBucket <139> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17735.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17734.0>,shutdown} [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',139}] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17750.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17750.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139]) [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17757.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[140]}, {checkpoints,[{140,0}]}, {name,"rebalance_140"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17757.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 140 [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17757.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17756.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_140_'ns_1@10.1.2.33'">>, <<"replication_building_140_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17756.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17755.0>,shutdown} [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',140}] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17771.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17771.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140]) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_140_'ns_1@10.1.2.33' - 140, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_140_'ns_1@10.1.2.31' - 140, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_140 - 140, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_140 - VBucket <140> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17778.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[141]}, {checkpoints,[{141,0}]}, {name,"rebalance_141"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17778.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 141 [rebalance:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17778.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17777.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_141_'ns_1@10.1.2.33'">>, <<"replication_building_141_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17777.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17776.0>,shutdown} [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',141}] [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17792.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139, 140], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:<0.17792.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140, 141]) [ns_server:info] [2012-04-10 18:23:23] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_141_'ns_1@10.1.2.33' - 141, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_141_'ns_1@10.1.2.31' - 141, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_141 - 141, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_141 - VBucket <141> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17799.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[142]}, {checkpoints,[{142,0}]}, {name,"rebalance_142"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17799.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 142 [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17799.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17798.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_142_'ns_1@10.1.2.33'">>, <<"replication_building_142_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17798.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17797.0>,shutdown} [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',142}] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17813.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139, 140, 141], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17813.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140, 141, 142]) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_142_'ns_1@10.1.2.33' - 142, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_142_'ns_1@10.1.2.31' - 142, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_142 - 142, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_142 - VBucket <142> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_143_'ns_1@10.1.2.33' - 143, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17820.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[143]}, {checkpoints,[{143,0}]}, {name,"rebalance_143"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17820.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 143 [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17820.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17819.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_143_'ns_1@10.1.2.33'">>, <<"replication_building_143_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17819.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17818.0>,shutdown} [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',143}] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17834.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139, 140, 141, 142], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17834.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140, 141, 142, 143]) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_143_'ns_1@10.1.2.31' - 143, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_143 - 143, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_143 - VBucket <143> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17841.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[144]}, {checkpoints,[{144,0}]}, {name,"rebalance_144"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17841.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 144 [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17841.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17840.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_144_'ns_1@10.1.2.33'">>, <<"replication_building_144_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17840.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17839.0>,shutdown} [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',144}] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17855.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139, 140, 141, 142, 143], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17855.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140, 141, 142, 143, 144]) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_144_'ns_1@10.1.2.33' - 144, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_144_'ns_1@10.1.2.31' - 144, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_144 - 144, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_144 - VBucket <144> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_145_'ns_1@10.1.2.33' - 145, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_145_'ns_1@10.1.2.31' - 145, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17863.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[145]}, {checkpoints,[{145,0}]}, {name,"rebalance_145"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17863.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 145 [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17863.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17861.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_145_'ns_1@10.1.2.33'">>, <<"replication_building_145_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17861.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17860.0>,shutdown} [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.31',145}] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17884.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', {new_child_id, [137, 138, 139, 140, 141, 142, 143, 144], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17884.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.31', [137, 138, 139, 140, 141, 142, 143, 144, 145]) [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_145 - 145, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_145 - VBucket <145> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_146_'ns_1@10.1.2.32' - 146, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_146_'ns_1@10.1.2.33' - 146, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17891.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[146]}, {checkpoints,[{146,0}]}, {name,"rebalance_146"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17891.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 146 [rebalance:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17891.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17890.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_146_'ns_1@10.1.2.33'">>, <<"replication_building_146_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17890.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17889.0>,shutdown} [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',146}] [ns_server:info] [2012-04-10 18:23:24] [ns_1@10.1.2.30:<0.17906.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146]) [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17913.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[147]}, {checkpoints,[{147,0}]}, {name,"rebalance_147"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17913.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 147 [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17913.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_146 - 146, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_146 - VBucket <146> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_147_'ns_1@10.1.2.33' - 147, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_147_'ns_1@10.1.2.32' - 147, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_147 - 147, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_147 - VBucket <147> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17911.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_147_'ns_1@10.1.2.33'">>, <<"replication_building_147_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17911.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17910.0>,shutdown} [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',147}] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17928.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17928.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147]) [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17934.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[148]}, {checkpoints,[{148,0}]}, {name,"rebalance_148"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17934.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 148 [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17934.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17933.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_148_'ns_1@10.1.2.33'">>, <<"replication_building_148_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17933.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17932.0>,shutdown} [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',148}] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17949.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17949.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148]) [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[130]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129]}] [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,132,133,134,135, 136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152, 153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169, 170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [130] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_148_'ns_1@10.1.2.33' - 148, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_148_'ns_1@10.1.2.32' - 148, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_148 - 148, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_148 - VBucket <148> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17955.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[149]}, {checkpoints,[{149,0}]}, {name,"rebalance_149"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17955.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 149 [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17955.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17954.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_149_'ns_1@10.1.2.33'">>, <<"replication_building_149_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17954.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17953.0>,shutdown} [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',149}] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17970.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17970.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149]) [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_149_'ns_1@10.1.2.32' - 149, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_149_'ns_1@10.1.2.33' - 149, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_149 - 149, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_149 - VBucket <149> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17976.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[150]}, {checkpoints,[{150,0}]}, {name,"rebalance_150"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17976.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 150 [rebalance:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17976.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17975.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_150_'ns_1@10.1.2.33'">>, <<"replication_building_150_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17975.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17974.0>,shutdown} [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',150}] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17991.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148, 149], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:<0.17991.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149, 150]) [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[130]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129]}] [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,133,134,135,136, 137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153, 154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170, 171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [130] Cleanup: [132] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_150_'ns_1@10.1.2.33' - 150, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_150_'ns_1@10.1.2.32' - 150, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_150 - 150, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_150 - VBucket <150> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[130]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129]}] [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [130] Cleanup: [133] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:25] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,134,135,136,137, 138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154, 155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [130] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [ns_server:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_151_'ns_1@10.1.2.32' - 151, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_151_'ns_1@10.1.2.33' - 151, ill is completed with VBuckets [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [134] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:<0.18021.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[151]}, {checkpoints,[{151,0}]}, {name,"rebalance_151"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:<0.18021.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 151 [rebalance:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:<0.18021.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 139,140,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [ns_server:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:<0.17996.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_151_'ns_1@10.1.2.33'">>, <<"replication_building_151_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:<0.17996.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.17995.0>,shutdown} [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 139,140,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [142] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 140,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 140,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [139] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,138, 141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [140] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_151 - 151, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_151 - VBucket <151> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,141, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,136,137,141, 143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [138] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,137,141,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,137,141,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [136] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,137,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,137,143,144, 145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [141] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,135,143,144,145, 146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: [137] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,127,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [135] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,128,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [127] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[126,131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [128] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,143,144,145,146, 147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "~" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,144,145,146,147, 148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: [143] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,[131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:26] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,145,146,147,148, 149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: [144] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,[131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,146,147,148,149, 150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: [145] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',151}] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18196.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148, 149, 150], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18196.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149, 150, 151]) [views:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,[131]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130]}] [views:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,147,148,149,150, 151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167, 168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: [146] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18212.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[152]}, {checkpoints,[{152,0}]}, {name,"rebalance_152"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18212.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 152 [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18212.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18201.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_152_'ns_1@10.1.2.33'">>, <<"replication_building_152_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18201.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18200.0>,shutdown} [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',152}] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18227.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148, 149, 150, 151], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18227.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149, 150, 151, 152]) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_152_'ns_1@10.1.2.32' - 152, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_152_'ns_1@10.1.2.33' - 152, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_152 - 152, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_152 - VBucket <152> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_153_'ns_1@10.1.2.33' - 153, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_153_'ns_1@10.1.2.32' - 153, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18233.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[153]}, {checkpoints,[{153,0}]}, {name,"rebalance_153"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18233.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 153 [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18233.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18232.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_153_'ns_1@10.1.2.33'">>, <<"replication_building_153_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18232.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18231.0>,shutdown} [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',153}] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18248.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148, 149, 150, 151, 152], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18248.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149, 150, 151, 152, 153]) [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18254.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[154]}, {checkpoints,[{154,0}]}, {name,"rebalance_154"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18254.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 154 [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18254.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_153 - 153, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_153 - VBucket <153> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_154_'ns_1@10.1.2.32' - 154, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_154_'ns_1@10.1.2.33' - 154, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_154 - 154, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_154 - VBucket <154> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18253.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_154_'ns_1@10.1.2.33'">>, <<"replication_building_154_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18253.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18252.0>,shutdown} [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.32',154}] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18269.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', {new_child_id, [146, 147, 148, 149, 150, 151, 152, 153], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18269.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.32', [146, 147, 148, 149, 150, 151, 152, 153, 154]) [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18275.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[155]}, {checkpoints,[{155,0}]}, {name,"rebalance_155"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18275.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 155 [rebalance:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18275.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18274.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_155_'ns_1@10.1.2.33'">>, <<"replication_building_155_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18274.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18273.0>,shutdown} [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',155}] [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:<0.18292.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155]) [ns_server:info] [2012-04-10 18:23:27] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_155_'ns_1@10.1.2.34' - 155, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_155_'ns_1@10.1.2.33' - 155, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_155 - 155, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_155 - VBucket <155> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_156_'ns_1@10.1.2.33' - 156, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18307.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[156]}, {checkpoints,[{156,0}]}, {name,"rebalance_156"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18307.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 156 [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18307.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18302.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_156_'ns_1@10.1.2.33'">>, <<"replication_building_156_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18302.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18301.0>,shutdown} [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',156}] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18324.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18324.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156]) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_156_'ns_1@10.1.2.34' - 156, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_156 - 156, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_156 - VBucket <156> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_157_'ns_1@10.1.2.34' - 157, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_157_'ns_1@10.1.2.33' - 157, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18328.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[157]}, {checkpoints,[{157,0}]}, {name,"rebalance_157"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18328.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 157 [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18328.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18327.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_157_'ns_1@10.1.2.33'">>, <<"replication_building_157_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18327.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18326.0>,shutdown} [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',157}] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18345.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18345.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157]) [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18349.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[158]}, {checkpoints,[{158,0}]}, {name,"rebalance_158"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18349.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 158 [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18349.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18348.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_158_'ns_1@10.1.2.33'">>, <<"replication_building_158_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18348.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18347.0>,shutdown} [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_157 - 157, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_157 - VBucket <157> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_158_'ns_1@10.1.2.33' - 158, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_158_'ns_1@10.1.2.34' - 158, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_158 - 158, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_158 - VBucket <158> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',158}] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18366.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156, 157], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18366.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157, 158]) [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18370.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,[159]}, {checkpoints,[{159,0}]}, {name,"rebalance_159"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18370.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 159 [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18370.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18369.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_159_'ns_1@10.1.2.33'">>, <<"replication_building_159_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18369.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18368.0>,shutdown} [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',159}] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18395.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156, 157, 158], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18395.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157, 158, 159]) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_159_'ns_1@10.1.2.33' - 159, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_159_'ns_1@10.1.2.34' - 159, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_159 - 159, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_159 - VBucket <159> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_160_'ns_1@10.1.2.33' - 160, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18399.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets," "}, {checkpoints,[{160,0}]}, {name,"rebalance_160"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18399.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 160 [rebalance:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18399.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18398.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_160_'ns_1@10.1.2.33'">>, <<"replication_building_160_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18398.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18397.0>,shutdown} [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',160}] [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18416.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156, 157, 158, 159], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:<0.18416.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157, 158, 159, 160]) [ns_server:info] [2012-04-10 18:23:28] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_160_'ns_1@10.1.2.34' - 160, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_160 - 160, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_160 - VBucket <160> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18420.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¡"}, {checkpoints,[{161,0}]}, {name,"rebalance_161"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18420.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 161 [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18420.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18419.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_161_'ns_1@10.1.2.33'">>, <<"replication_building_161_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18419.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18418.0>,shutdown} [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',161}] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18437.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156, 157, 158, 159, 160], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18437.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157, 158, 159, 160, 161]) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_161_'ns_1@10.1.2.33' - 161, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_161_'ns_1@10.1.2.34' - 161, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_161 - 161, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_161 - VBucket <161> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_162_'ns_1@10.1.2.33' - 162, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18441.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¢"}, {checkpoints,[{162,0}]}, {name,"rebalance_162"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18441.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 162 [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18441.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18440.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_162_'ns_1@10.1.2.33'">>, <<"replication_building_162_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18440.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18439.0>,shutdown} [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.34',162}] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18458.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', {new_child_id, [155, 156, 157, 158, 159, 160, 161], 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18458.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.34', [155, 156, 157, 158, 159, 160, 161, 162]) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_162_'ns_1@10.1.2.34' - 162, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_162 - 162, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_162 - VBucket <162> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_163_'ns_1@10.1.2.33' - 163, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_163_'ns_1@10.1.2.35' - 163, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18462.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"£"}, {checkpoints,[{163,0}]}, {name,"rebalance_163"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18462.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 163 [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18462.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18461.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_163_'ns_1@10.1.2.33'">>, <<"replication_building_163_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18461.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18460.0>,shutdown} [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',163}] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18480.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£") [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18483.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¤"}, {checkpoints,[{164,0}]}, {name,"rebalance_164"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18483.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 164 [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18483.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18482.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_164_'ns_1@10.1.2.33'">>, <<"replication_building_164_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18482.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18481.0>,shutdown} [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_163 - 163, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_163 - VBucket <163> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_164_'ns_1@10.1.2.33' - 164, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_164_'ns_1@10.1.2.35' - 164, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_164 - 164, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_164 - VBucket <164> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',164}] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18501.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18501.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤") [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18504.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¥"}, {checkpoints,[{165,0}]}, {name,"rebalance_165"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18504.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 165 [rebalance:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18504.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18503.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_165_'ns_1@10.1.2.33'">>, <<"replication_building_165_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18503.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18502.0>,shutdown} [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',165}] [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18522.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:<0.18522.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥") [ns_server:info] [2012-04-10 18:23:29] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_165_'ns_1@10.1.2.33' - 165, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_165_'ns_1@10.1.2.35' - 165, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_165 - 165, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_165 - VBucket <165> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18525.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¦"}, {checkpoints,[{166,0}]}, {name,"rebalance_166"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18525.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 166 [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18525.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18524.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_166_'ns_1@10.1.2.33'">>, <<"replication_building_166_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18524.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18523.0>,shutdown} [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',166}] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18543.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18543.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦") [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_166_'ns_1@10.1.2.35' - 166, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_166_'ns_1@10.1.2.33' - 166, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_166 - 166, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_166 - VBucket <166> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18546.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"§"}, {checkpoints,[{167,0}]}, {name,"rebalance_167"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18546.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 167 [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18546.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18545.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_167_'ns_1@10.1.2.33'">>, <<"replication_building_167_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18545.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18544.0>,shutdown} [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',167}] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18564.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥¦", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18564.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦§") [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_167_'ns_1@10.1.2.35' - 167, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_167_'ns_1@10.1.2.33' - 167, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_167 - 167, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_167 - VBucket <167> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_168_'ns_1@10.1.2.33' - 168, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_168_'ns_1@10.1.2.35' - 168, ill is completed with VBuckets [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[147,148,149,150,151]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [131] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18567.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¨"}, {checkpoints,[{168,0}]}, {name,"rebalance_168"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18567.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 168 [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18567.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18566.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_168_'ns_1@10.1.2.33'">>, <<"replication_building_168_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18566.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18565.0>,shutdown} [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',168}] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18585.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥¦§", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18585.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦§¨") [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18588.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"©"}, {checkpoints,[{169,0}]}, {name,"rebalance_169"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18588.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 169 [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18588.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_168 - 168, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_168 - VBucket <168> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_169_'ns_1@10.1.2.33' - 169, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_169_'ns_1@10.1.2.35' - 169, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_169 - 169, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_169 - VBucket <169> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18587.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_169_'ns_1@10.1.2.33'">>, <<"replication_building_169_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18587.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18586.0>,shutdown} [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',169}] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18614.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥¦§¨", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18614.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦§¨©") [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[147,149,150,151]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [148] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18618.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ª"}, {checkpoints,[{170,0}]}, {name,"rebalance_170"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18618.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 170 [rebalance:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18618.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18616.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_170_'ns_1@10.1.2.33'">>, <<"replication_building_170_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:<0.18616.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18615.0>,shutdown} [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[149,150,151]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [147] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:30] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_170_'ns_1@10.1.2.35' - 170, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_170_'ns_1@10.1.2.33' - 170, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_170 - 170, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_170 - VBucket <170> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[149,150]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [151] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[149]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,152,153,154,155, 156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172, 173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189, 190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [150] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[149]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [152] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,153,154,155,156, 157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173, 174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [149] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,154,155,156,157, 158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208, 209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [153] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,156,157,158, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192, 193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, 210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [154] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193, 194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [156] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,158,159,160, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177, 178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194, 195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211, 212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [157] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,158,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,158,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: [159] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,155,160,161,162, 163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: [158] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,160,161,162,163, 164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [155] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,161,162,163,164, 165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: " " Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,162,163,164,165, 166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "¡" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',170}] [ns_server:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:<0.18781.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥¦§¨©", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:<0.18781.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦§¨©ª") [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:31] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,163,164,165,166, 167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "¢" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18797.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"«"}, {checkpoints,[{171,0}]}, {name,"rebalance_171"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18797.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 171 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18797.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18783.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_171_'ns_1@10.1.2.33'">>, <<"replication_building_171_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18783.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18782.0>,shutdown} [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.33','ns_1@10.1.2.35',171}] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18815.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', {new_child_id, "£¤¥¦§¨©ª", 'ns_1@10.1.2.33'}) [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18815.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.33', 'ns_1@10.1.2.35', "£¤¥¦§¨©ª«") [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_171_'ns_1@10.1.2.35' - 171, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_171_'ns_1@10.1.2.33' - 171, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_171 - 171, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_171 - VBucket <171> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_172_'ns_1@10.1.2.34' - 172, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18818.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬"}, {checkpoints,[{172,0}]}, {name,"rebalance_172"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18818.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 172 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18818.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18817.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_172_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18817.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18816.0>,shutdown} [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',172}] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18831.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬") [error_logger:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18837.0>}, {name,{new_child_id,"¬",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18837.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬"}, {checkpoints,[{172,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18837.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 172 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18841.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"­"}, {checkpoints,[{173,0}]}, {name,"rebalance_173"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18841.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 173 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18841.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18839.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_173_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18839.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18838.0>,shutdown} [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',173}] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18854.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18837.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_172 - 172, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_172 - VBucket <172> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_173_'ns_1@10.1.2.34' - 173, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_173 - 173, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_173 - VBucket <173> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18854.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­") [error_logger:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18860.0>}, {name,{new_child_id,"¬­",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18860.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­"}, {checkpoints,[{172,0},{173,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18860.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 173 [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.725752508361204]], [['ns_1@10.1.2.31'| 0.9767441860465116]], [['ns_1@10.1.2.32'| 0.9767441860465116]], [['ns_1@10.1.2.33'| 0.9767441860465116]], [['ns_1@10.1.2.34'| 0.04761904761904767]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18872.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"®"}, {checkpoints,[{174,0}]}, {name,"rebalance_174"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18872.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 174 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18872.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18862.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_174_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18862.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18861.0>,shutdown} [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',174}] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18885.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18860.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18885.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®") [error_logger:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18891.0>}, {name,{new_child_id,"¬­®",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18891.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®"}, {checkpoints,[{172,0},{173,0},{174,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18891.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 174 [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_174_'ns_1@10.1.2.34' - 174, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_174 - 174, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_174 - VBucket <174> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_175_'ns_1@10.1.2.34' - 175, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18895.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¯"}, {checkpoints,[{175,0}]}, {name,"rebalance_175"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18895.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 175 [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18895.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18893.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_175_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18893.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18892.0>,shutdown} [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',175}] [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18915.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18891.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18915.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯") [error_logger:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18921.0>}, {name,{new_child_id,"¬­®¯",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18921.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯"}, {checkpoints,[{172,0},{173,0},{174,0},{175,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:32] [ns_1@10.1.2.30:<0.18921.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 175 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18927.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"°"}, {checkpoints,[{176,0}]}, {name,"rebalance_176"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18927.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 176 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18927.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18923.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_176_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18923.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18922.0>,shutdown} [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',176}] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_175 - 175, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_175 - VBucket <175> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_176_'ns_1@10.1.2.34' - 176, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_176 - 176, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_176 - VBucket <176> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18940.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®¯", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18921.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18940.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯°") [error_logger:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18946.0>}, {name,{new_child_id,"¬­®¯°",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯°"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18946.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯°"}, {checkpoints,[{172,0},{173,0},{174,0},{175,0},{176,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18946.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 176 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18950.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"±"}, {checkpoints,[{177,0}]}, {name,"rebalance_177"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18950.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 177 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18950.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18948.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_177_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18948.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18947.0>,shutdown} [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',177}] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18963.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®¯°", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18946.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18963.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯°±") [error_logger:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18969.0>}, {name,{new_child_id,"¬­®¯°±",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯°±"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18969.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯°±"}, {checkpoints,[{172,0},{173,0},{174,0},{175,0},{176,0},{177,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18969.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 177 [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_177_'ns_1@10.1.2.34' - 177, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_177 - 177, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_177 - VBucket <177> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_178_'ns_1@10.1.2.34' - 178, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18973.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"²"}, {checkpoints,[{178,0}]}, {name,"rebalance_178"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18973.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 178 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18973.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18971.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_178_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18971.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18970.0>,shutdown} [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',178}] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18986.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®¯°±", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18969.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18986.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯°±²") [error_logger:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.18992.0>}, {name,{new_child_id,"¬­®¯°±²",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯°±²"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18992.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯°±²"}, {checkpoints,[{172,0},{173,0},{174,0},{175,0},{176,0},{177,0},{178,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18992.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 178 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18996.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"³"}, {checkpoints,[{179,0}]}, {name,"rebalance_179"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18996.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 179 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18996.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_178 - 178, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_178 - VBucket <178> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_179_'ns_1@10.1.2.34' - 179, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_179 - 179, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_179 - VBucket <179> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18994.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_179_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18994.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.18993.0>,shutdown} [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',179}] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19009.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®¯°±²", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.18992.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19009.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯°±²³") [error_logger:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.19015.0>}, {name,{new_child_id,"¬­®¯°±²³",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯°±²³"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19015.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯°±²³"}, {checkpoints,[{172,0}, {173,0}, {174,0}, {175,0}, {176,0}, {177,0}, {178,0}, {179,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19015.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 179 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19019.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"´"}, {checkpoints,[{180,0}]}, {name,"rebalance_180"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19019.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 180 [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19019.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19017.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_180_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19017.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19016.0>,shutdown} [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.30',180}] [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19032.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', {new_child_id, "¬­®¯°±²³", 'ns_1@10.1.2.34'}) [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19015.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19032.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.30', "¬­®¯°±²³´") [error_logger:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.19038.0>}, {name,{new_child_id,"¬­®¯°±²³´",'ns_1@10.1.2.34'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.34",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"¬­®¯°±²³´"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19038.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¬­®¯°±²³´"}, {checkpoints,[{172,0}, {173,0}, {174,0}, {175,0}, {176,0}, {177,0}, {178,0}, {179,0}, {180,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:<0.19038.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 180 [ns_server:info] [2012-04-10 18:23:33] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_180_'ns_1@10.1.2.34' - 180, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_180 - 180, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_180 - VBucket <180> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_181_'ns_1@10.1.2.31' - 181, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_181_'ns_1@10.1.2.34' - 181, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19043.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"µ"}, {checkpoints,[{181,0}]}, {name,"rebalance_181"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19043.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 181 [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19043.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19040.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_181_'ns_1@10.1.2.34'">>, <<"replication_building_181_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19040.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19039.0>,shutdown} [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',181}] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19057.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ") [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19064.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¶"}, {checkpoints,[{182,0}]}, {name,"rebalance_182"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19064.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 182 [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19064.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19063.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_182_'ns_1@10.1.2.34'">>, <<"replication_building_182_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19063.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19062.0>,shutdown} [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_181 - 181, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_181 - VBucket <181> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_182_'ns_1@10.1.2.34' - 182, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_182_'ns_1@10.1.2.31' - 182, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_182 - 182, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_182 - VBucket <182> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',182}] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19078.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19078.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶") [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19085.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"·"}, {checkpoints,[{183,0}]}, {name,"rebalance_183"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19085.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 183 [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19085.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19084.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_183_'ns_1@10.1.2.34'">>, <<"replication_building_183_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19084.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19083.0>,shutdown} [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',183}] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19099.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19099.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·") [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_183_'ns_1@10.1.2.34' - 183, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_183_'ns_1@10.1.2.31' - 183, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_183 - 183, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_183 - VBucket <183> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19106.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¸"}, {checkpoints,[{184,0}]}, {name,"rebalance_184"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19106.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 184 [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19106.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19105.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_184_'ns_1@10.1.2.34'">>, <<"replication_building_184_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19105.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19104.0>,shutdown} [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',184}] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19128.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶·", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19128.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·¸") [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_184_'ns_1@10.1.2.31' - 184, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_184_'ns_1@10.1.2.34' - 184, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_184 - 184, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_184 - VBucket <184> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19135.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¹"}, {checkpoints,[{185,0}]}, {name,"rebalance_185"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19135.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 185 [rebalance:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19135.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19134.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_185_'ns_1@10.1.2.34'">>, <<"replication_building_185_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19134.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19133.0>,shutdown} [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',185}] [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19149.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶·¸", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:34] [ns_1@10.1.2.30:<0.19149.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·¸¹") [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_185_'ns_1@10.1.2.34' - 185, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_185_'ns_1@10.1.2.31' - 185, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_185 - 185, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_185 - VBucket <185> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19156.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"º"}, {checkpoints,[{186,0}]}, {name,"rebalance_186"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19156.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 186 [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19156.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19155.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_186_'ns_1@10.1.2.34'">>, <<"replication_building_186_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19155.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19154.0>,shutdown} [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',186}] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19170.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶·¸¹", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19170.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·¸¹º") [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_186_'ns_1@10.1.2.31' - 186, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_186_'ns_1@10.1.2.34' - 186, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_186 - 186, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_186 - VBucket <186> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_187_'ns_1@10.1.2.34' - 187, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19177.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"»"}, {checkpoints,[{187,0}]}, {name,"rebalance_187"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19177.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 187 [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19177.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19176.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_187_'ns_1@10.1.2.34'">>, <<"replication_building_187_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19176.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19175.0>,shutdown} [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',187}] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19191.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶·¸¹º", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19191.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·¸¹º»") [views:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,"¬­®"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136]}] [views:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: "¬­®" Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_187_'ns_1@10.1.2.31' - 187, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_187 - 187, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_187 - VBucket <187> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_188_'ns_1@10.1.2.34' - 188, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_188_'ns_1@10.1.2.31' - 188, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19198.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¼"}, {checkpoints,[{188,0}]}, {name,"rebalance_188"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19198.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 188 [rebalance:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19198.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19197.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_188_'ns_1@10.1.2.34'">>, <<"replication_building_188_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19197.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19196.0>,shutdown} [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.31',188}] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19212.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', {new_child_id, "µ¶·¸¹º»", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:<0.19212.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', "µ¶·¸¹º»¼") [views:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,"¬®"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173]}] [views:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,177,178, 179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195, 196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212, 213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: "¬®" Cleanup: "­" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:35] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_188 - 188, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_188 - VBucket <188> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_189_'ns_1@10.1.2.32' - 189, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_189_'ns_1@10.1.2.34' - 189, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:39] [ns_1@10.1.2.30:<0.19220.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"½"}, {checkpoints,[{189,0}]}, {name,"rebalance_189"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:39] [ns_1@10.1.2.30:<0.19220.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 189 [rebalance:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:<0.19220.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:<0.19218.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_189_'ns_1@10.1.2.34'">>, <<"replication_building_189_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:<0.19218.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19217.0>,shutdown} [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,"¬®"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,176,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213, 214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: "¬®" Cleanup: "±" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,"¬®"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,175,178,179,180, 181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: "¬®" Cleanup: "°" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,"¬®"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: "¬®" Cleanup: "¯" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_189 - 189, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_189 - VBucket <189> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,"¬"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,179,180,181, 182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: "¬" Cleanup: "®" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,"¬"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,180,181,182, 183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: "¬" Cleanup: "³" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,"¬"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,178,181,182,183, 184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: "¬" Cleanup: "´" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,"¬"}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: "¬" Cleanup: "²" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"£¤¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "¬" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"£¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "¤" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"¥¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "£" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"¦§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "¥" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"§¨©ª«"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:40] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "¦" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"§¨©ª"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "«" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"§¨ª"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "©" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"§¨"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "ª" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"§"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "¨" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,181,182,183,184, 185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "§" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',189}] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19432.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½") [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,182,183,184,185, 186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: [] Cleanup: "µ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,183,184,185,186, 187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "¶" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,184,185,186,187, 188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "·" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174]}] [views:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,185,186,187,188, 189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "¸" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19465.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¾"}, {checkpoints,[{190,0}]}, {name,"rebalance_190"}, {takeover,true}] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_190_'ns_1@10.1.2.34' - 190, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_190_'ns_1@10.1.2.32' - 190, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19465.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 190 [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19465.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19437.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_190_'ns_1@10.1.2.34'">>, <<"replication_building_190_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19437.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19436.0>,shutdown} [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',190}] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19491.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19491.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾") [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_190 - 190, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_190 - VBucket <190> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_191_'ns_1@10.1.2.34' - 191, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_191_'ns_1@10.1.2.32' - 191, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19497.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"¿"}, {checkpoints,[{191,0}]}, {name,"rebalance_191"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19497.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 191 [rebalance:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19497.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19496.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_191_'ns_1@10.1.2.34'">>, <<"replication_building_191_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19496.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19495.0>,shutdown} [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',191}] [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19512.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:41] [ns_1@10.1.2.30:<0.19512.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿") [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_191 - 191, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_191 - VBucket <191> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_192_'ns_1@10.1.2.34' - 192, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_192_'ns_1@10.1.2.32' - 192, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19518.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"À"}, {checkpoints,[{192,0}]}, {name,"rebalance_192"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19518.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 192 [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19518.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19517.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_192_'ns_1@10.1.2.34'">>, <<"replication_building_192_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19517.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19516.0>,shutdown} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',192}] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19533.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾¿", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19533.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿À") [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_192 - 192, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_192 - VBucket <192> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_193_'ns_1@10.1.2.34' - 193, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_193_'ns_1@10.1.2.32' - 193, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19539.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Á"}, {checkpoints,[{193,0}]}, {name,"rebalance_193"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19539.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 193 [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19539.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19538.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_193_'ns_1@10.1.2.34'">>, <<"replication_building_193_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19538.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19537.0>,shutdown} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',193}] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19554.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾¿À", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19554.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿ÀÁ") [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19560.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Â"}, {checkpoints,[{194,0}]}, {name,"rebalance_194"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19560.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 194 [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19560.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_193 - 193, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_193 - VBucket <193> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_194_'ns_1@10.1.2.32' - 194, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_194_'ns_1@10.1.2.34' - 194, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_194 - 194, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_194 - VBucket <194> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19559.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_194_'ns_1@10.1.2.34'">>, <<"replication_building_194_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19559.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19558.0>,shutdown} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',194}] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19575.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾¿ÀÁ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19575.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿ÀÁÂ") [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19581.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ã"}, {checkpoints,[{195,0}]}, {name,"rebalance_195"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19581.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 195 [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19581.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.7959866220735786]], [['ns_1@10.1.2.31'| 0.9767441860465116]], [['ns_1@10.1.2.32'| 0.9767441860465116]], [['ns_1@10.1.2.33'| 0.9767441860465116]], [['ns_1@10.1.2.34'| 0.5476190476190477]], [['ns_1@10.1.2.35'|0.0]], [],[],[],[],[],[],[],[],[],[]}}}} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19580.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_195_'ns_1@10.1.2.34'">>, <<"replication_building_195_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19580.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19579.0>,shutdown} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',195}] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19603.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾¿ÀÁÂ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19603.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿ÀÁÂÃ") [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_195_'ns_1@10.1.2.34' - 195, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_195_'ns_1@10.1.2.32' - 195, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_195 - 195, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_195 - VBucket <195> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_196_'ns_1@10.1.2.34' - 196, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_196_'ns_1@10.1.2.32' - 196, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19610.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ä"}, {checkpoints,[{196,0}]}, {name,"rebalance_196"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19610.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 196 [rebalance:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19610.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19609.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_196_'ns_1@10.1.2.34'">>, <<"replication_building_196_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19609.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19608.0>,shutdown} [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.32',196}] [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19625.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', {new_child_id, "½¾¿ÀÁÂÃ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:42] [ns_1@10.1.2.30:<0.19625.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.32', "½¾¿ÀÁÂÃÄ") [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19642.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Å"}, {checkpoints,[{197,0}]}, {name,"rebalance_197"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19642.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 197 [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19642.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_196 - 196, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_196 - VBucket <196> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_197_'ns_1@10.1.2.34' - 197, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_197_'ns_1@10.1.2.33' - 197, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_197 - 197, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_197 - VBucket <197> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19630.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_197_'ns_1@10.1.2.34'">>, <<"replication_building_197_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19630.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19629.0>,shutdown} [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',197}] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19658.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "Å") [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19663.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Æ"}, {checkpoints,[{198,0}]}, {name,"rebalance_198"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19663.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 198 [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19663.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19662.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_198_'ns_1@10.1.2.34'">>, <<"replication_building_198_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19662.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19661.0>,shutdown} [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',198}] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19679.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "Å", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19679.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆ") [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_198_'ns_1@10.1.2.34' - 198, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_198_'ns_1@10.1.2.33' - 198, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_198 - 198, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_198 - VBucket <198> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_199_'ns_1@10.1.2.34' - 199, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19684.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ç"}, {checkpoints,[{199,0}]}, {name,"rebalance_199"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19684.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 199 [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19684.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19683.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_199_'ns_1@10.1.2.34'">>, <<"replication_building_199_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19683.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19682.0>,shutdown} [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',199}] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19700.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19700.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇ") [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_199_'ns_1@10.1.2.33' - 199, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_199 - 199, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_199 - VBucket <199> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_200_'ns_1@10.1.2.34' - 200, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_200_'ns_1@10.1.2.33' - 200, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19705.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"È"}, {checkpoints,[{200,0}]}, {name,"rebalance_200"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19705.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 200 [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19705.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19704.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_200_'ns_1@10.1.2.34'">>, <<"replication_building_200_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19704.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19703.0>,shutdown} [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',200}] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19721.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆÇ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19721.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇÈ") [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19726.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"É"}, {checkpoints,[{201,0}]}, {name,"rebalance_201"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19726.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 201 [rebalance:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19726.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_200 - 200, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_200 - VBucket <200> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_201_'ns_1@10.1.2.33' - 201, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_201_'ns_1@10.1.2.34' - 201, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_201 - 201, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_201 - VBucket <201> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19725.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_201_'ns_1@10.1.2.34'">>, <<"replication_building_201_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19725.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19724.0>,shutdown} [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',201}] [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19742.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆÇÈ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:43] [ns_1@10.1.2.30:<0.19742.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇÈÉ") [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19747.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ê"}, {checkpoints,[{202,0}]}, {name,"rebalance_202"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19747.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 202 [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19747.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19746.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_202_'ns_1@10.1.2.34'">>, <<"replication_building_202_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19746.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19745.0>,shutdown} [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',202}] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19763.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆÇÈÉ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19763.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇÈÉÊ") [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_202_'ns_1@10.1.2.34' - 202, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_202_'ns_1@10.1.2.33' - 202, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_202 - 202, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_202 - VBucket <202> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_203_'ns_1@10.1.2.34' - 203, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19768.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ë"}, {checkpoints,[{203,0}]}, {name,"rebalance_203"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19768.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 203 [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19768.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19767.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_203_'ns_1@10.1.2.34'">>, <<"replication_building_203_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19767.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19766.0>,shutdown} [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',203}] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19784.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆÇÈÉÊ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19784.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇÈÉÊË") [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_203_'ns_1@10.1.2.33' - 203, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_203 - 203, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_203 - VBucket <203> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_204_'ns_1@10.1.2.33' - 204, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_204_'ns_1@10.1.2.34' - 204, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19789.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ì"}, {checkpoints,[{204,0}]}, {name,"rebalance_204"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19789.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 204 [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19789.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19788.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_204_'ns_1@10.1.2.34'">>, <<"replication_building_204_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19788.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19787.0>,shutdown} [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.33',204}] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19805.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', {new_child_id, "ÅÆÇÈÉÊË", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19805.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.33', "ÅÆÇÈÉÊËÌ") [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_204 - 204, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_204 - VBucket <204> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_205_'ns_1@10.1.2.35' - 205, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_205_'ns_1@10.1.2.34' - 205, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19810.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Í"}, {checkpoints,[{205,0}]}, {name,"rebalance_205"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19810.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 205 [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19810.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19809.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_205_'ns_1@10.1.2.34'">>, <<"replication_building_205_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19809.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19808.0>,shutdown} [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',205}] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19835.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "Í") [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19839.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Î"}, {checkpoints,[{206,0}]}, {name,"rebalance_206"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19839.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 206 [rebalance:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19839.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19837.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_206_'ns_1@10.1.2.34'">>, <<"replication_building_206_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19837.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19836.0>,shutdown} [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',206}] [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_205 - 205, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_205 - VBucket <205> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_206_'ns_1@10.1.2.34' - 206, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_206_'ns_1@10.1.2.35' - 206, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_206 - 206, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_206 - VBucket <206> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19857.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "Í", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:44] [ns_1@10.1.2.30:<0.19857.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎ") [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19860.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ï"}, {checkpoints,[{207,0}]}, {name,"rebalance_207"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19860.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 207 [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19860.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19859.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_207_'ns_1@10.1.2.34'">>, <<"replication_building_207_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19859.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19858.0>,shutdown} [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',207}] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19878.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19878.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏ") [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_207_'ns_1@10.1.2.35' - 207, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_207_'ns_1@10.1.2.34' - 207, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_207 - 207, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_207 - VBucket <207> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_208_'ns_1@10.1.2.34' - 208, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_208_'ns_1@10.1.2.35' - 208, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19881.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ð"}, {checkpoints,[{208,0}]}, {name,"rebalance_208"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19881.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 208 [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19881.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19880.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_208_'ns_1@10.1.2.34'">>, <<"replication_building_208_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19880.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19879.0>,shutdown} [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',208}] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19899.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19899.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐ") [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"¹º»¼½¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_208 - 208, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_208 - VBucket <208> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_209_'ns_1@10.1.2.34' - 209, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_209_'ns_1@10.1.2.35' - 209, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19902.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ñ"}, {checkpoints,[{209,0}]}, {name,"rebalance_209"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19902.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 209 [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19902.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19901.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_209_'ns_1@10.1.2.34'">>, <<"replication_building_209_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19901.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19900.0>,shutdown} [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',209}] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19920.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏÐ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19920.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐÑ") [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19923.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ò"}, {checkpoints,[{210,0}]}, {name,"rebalance_210"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19923.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 210 [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19923.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19922.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_210_'ns_1@10.1.2.34'">>, <<"replication_building_210_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_209 - 209, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_209 - VBucket <209> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_210_'ns_1@10.1.2.34' - 210, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_210_'ns_1@10.1.2.35' - 210, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_210 - 210, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_210 - VBucket <210> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19922.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19921.0>,shutdown} [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',210}] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19941.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏÐÑ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19941.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐÑÒ") [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"¹º»¼¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "½" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"º»¼¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "¹" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"»¼¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "º" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19963.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ó"}, {checkpoints,[{211,0}]}, {name,"rebalance_211"}, {takeover,true}] [ns_server:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_211_'ns_1@10.1.2.35' - 211, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_211_'ns_1@10.1.2.34' - 211, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19963.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 211 [rebalance:info] [2012-04-10 18:23:45] [ns_1@10.1.2.30:<0.19963.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"¼¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "»" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:<0.19943.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_211_'ns_1@10.1.2.34'">>, <<"replication_building_211_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:<0.19943.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.19942.0>,shutdown} [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214, 215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "¼" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_211 - 211, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_211 - VBucket <211> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,198, 199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ä" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,199, 200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "Æ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,197,200, 201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "Ç" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,195,200,201, 202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "Å" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,194,200,201,202, 203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: [] Cleanup: "Ã" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"¾¿ÀÁ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "Â" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"¾¿À"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "Á" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"¾¿"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "À" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"¿"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,200,201,202,203, 204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "¾" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,"¿"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "È" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:46] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,201,202,203,204, 205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "¿" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',211}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20124.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏÐÑÒ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20124.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐÑÒÓ") [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20127.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ô"}, {checkpoints,[{212,0}]}, {name,"rebalance_212"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20127.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 212 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20127.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20126.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_212_'ns_1@10.1.2.34'">>, <<"replication_building_212_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20126.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20125.0>,shutdown} [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',212}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20145.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏÐÑÒÓ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20145.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐÑÒÓÔ") [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_212_'ns_1@10.1.2.34' - 212, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_212_'ns_1@10.1.2.35' - 212, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_212 - 212, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_212 - VBucket <212> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_213_'ns_1@10.1.2.34' - 213, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_213_'ns_1@10.1.2.35' - 213, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20148.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Õ"}, {checkpoints,[{213,0}]}, {name,"rebalance_213"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20148.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 213 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20148.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20147.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_213_'ns_1@10.1.2.34'">>, <<"replication_building_213_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20147.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20146.0>,shutdown} [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.34','ns_1@10.1.2.35',213}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20166.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', {new_child_id, "ÍÎÏÐÑÒÓÔ", 'ns_1@10.1.2.34'}) [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20166.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.34', 'ns_1@10.1.2.35', "ÍÎÏÐÑÒÓÔÕ") [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20169.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö"}, {checkpoints,[{214,0}]}, {name,"rebalance_214"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20169.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 214 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20169.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20168.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_214_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20168.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20167.0>,shutdown} [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',214}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20182.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö") [error_logger:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20188.0>}, {name,{new_child_id,"Ö",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_213 - 213, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_213 - VBucket <213> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_214_'ns_1@10.1.2.35' - 214, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_214 - 214, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_214 - VBucket <214> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20188.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö"}, {checkpoints,[{214,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20188.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 214 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20192.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"×"}, {checkpoints,[{215,0}]}, {name,"rebalance_215"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20192.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 215 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20192.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20190.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_215_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20190.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20189.0>,shutdown} [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',215}] [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20205.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20188.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20205.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×") [error_logger:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20211.0>}, {name,{new_child_id,"Ö×",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20211.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×"}, {checkpoints,[{214,0},{215,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20211.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 215 [ns_server:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_215_'ns_1@10.1.2.35' - 215, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_215 - 215, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_215 - VBucket <215> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_216_'ns_1@10.1.2.35' - 216, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20222.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ø"}, {checkpoints,[{216,0}]}, {name,"rebalance_216"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20222.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 216 [rebalance:info] [2012-04-10 18:23:47] [ns_1@10.1.2.30:<0.20222.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20213.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_216_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20213.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20212.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',216}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20237.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20211.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20237.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×Ø") [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20243.0>}, {name,{new_child_id,"Ö×Ø",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×Ø"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20243.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×Ø"}, {checkpoints,[{214,0},{215,0},{216,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20243.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 216 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20247.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ù"}, {checkpoints,[{217,0}]}, {name,"rebalance_217"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20247.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 217 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20247.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20245.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_217_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20245.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20244.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',217}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20260.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×Ø", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20243.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20260.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×ØÙ") [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20266.0>}, {name,{new_child_id,"Ö×ØÙ",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×ØÙ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_216 - 216, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_216 - VBucket <216> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_217_'ns_1@10.1.2.35' - 217, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_217 - 217, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_217 - VBucket <217> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20266.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×ØÙ"}, {checkpoints,[{214,0},{215,0},{216,0},{217,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20266.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 217 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20270.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ú"}, {checkpoints,[{218,0}]}, {name,"rebalance_218"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20270.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 218 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20270.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20268.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_218_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20268.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20267.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',218}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20283.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×ØÙ", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20266.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20283.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×ØÙÚ") [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20289.0>}, {name,{new_child_id,"Ö×ØÙÚ",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×ØÙÚ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20289.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×ØÙÚ"}, {checkpoints,[{214,0},{215,0},{216,0},{217,0},{218,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20289.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 218 [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_218_'ns_1@10.1.2.35' - 218, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_218 - 218, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_218 - VBucket <218> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_219_'ns_1@10.1.2.35' - 219, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20293.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Û"}, {checkpoints,[{219,0}]}, {name,"rebalance_219"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20293.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 219 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20293.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20291.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_219_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20291.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20290.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',219}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20306.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×ØÙÚ", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20289.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20306.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×ØÙÚÛ") [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20312.0>}, {name,{new_child_id,"Ö×ØÙÚÛ",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×ØÙÚÛ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20312.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×ØÙÚÛ"}, {checkpoints,[{214,0},{215,0},{216,0},{217,0},{218,0},{219,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20312.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 219 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20323.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ü"}, {checkpoints,[{220,0}]}, {name,"rebalance_220"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20323.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 220 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20323.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20314.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_220_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20314.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20313.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',220}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20337.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×ØÙÚÛ", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20312.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20337.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×ØÙÚÛÜ") [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_219 - 219, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_219 - VBucket <219> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_220_'ns_1@10.1.2.35' - 220, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_220 - 220, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_220 - VBucket <220> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20343.0>}, {name,{new_child_id,"Ö×ØÙÚÛÜ",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×ØÙÚÛÜ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20343.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×ØÙÚÛÜ"}, {checkpoints,[{214,0},{215,0},{216,0},{217,0},{218,0},{219,0},{220,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20343.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 220 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20347.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ý"}, {checkpoints,[{221,0}]}, {name,"rebalance_221"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20347.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 221 [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20347.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20345.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_221_'ns_1@10.1.2.35'">>] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20345.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20344.0>,shutdown} [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.30',221}] [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20360.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', {new_child_id, "Ö×ØÙÚÛÜ", 'ns_1@10.1.2.35'}) [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20343.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20360.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.30', "Ö×ØÙÚÛÜÝ") [error_logger:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_vbm_new_sup-default'} started: [{pid,<0.20366.0>}, {name,{new_child_id,"Ö×ØÙÚÛÜÝ",'ns_1@10.1.2.35'}}, {mfargs, {ebucketmigrator_srv,start_link, [{"10.1.2.35",11209}, {"10.1.2.30",11209}, [{username,"default"}, {password,[]}, {vbuckets,"Ö×ØÙÚÛÜÝ"}, {takeover,false}, {suffix,"ns_1@10.1.2.30"}]]}}, {restart_type,permanent}, {shutdown,60000}, {child_type,worker}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20366.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Ö×ØÙÚÛÜÝ"}, {checkpoints,[{214,0}, {215,0}, {216,0}, {217,0}, {218,0}, {219,0}, {220,0}, {221,0}]}, {name,"replication_ns_1@10.1.2.30"}, {takeover,false}] [rebalance:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:<0.20366.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 221 [ns_server:info] [2012-04-10 18:23:48] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_221_'ns_1@10.1.2.35' - 221, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_221 - 221, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_221 - VBucket <221> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_222_'ns_1@10.1.2.31' - 222, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_222_'ns_1@10.1.2.35' - 222, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20371.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"Þ"}, {checkpoints,[{222,0}]}, {name,"rebalance_222"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20371.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 222 [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20371.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20368.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_222_'ns_1@10.1.2.35'">>, <<"replication_building_222_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20368.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20367.0>,shutdown} [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',222}] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20385.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þ") [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20392.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ß"}, {checkpoints,[{223,0}]}, {name,"rebalance_223"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20392.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 223 [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20392.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20391.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_223_'ns_1@10.1.2.35'">>, <<"replication_building_223_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20391.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20390.0>,shutdown} [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',223}] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_222 - 222, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_222 - VBucket <222> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_223_'ns_1@10.1.2.31' - 223, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_223_'ns_1@10.1.2.35' - 223, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_223 - 223, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_223 - VBucket <223> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20406.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þ", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20406.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þß") [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20413.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"à"}, {checkpoints,[{224,0}]}, {name,"rebalance_224"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20413.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 224 [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20413.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20412.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_224_'ns_1@10.1.2.35'">>, <<"replication_building_224_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20412.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20411.0>,shutdown} [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',224}] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20427.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þß", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20427.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßà") [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_224_'ns_1@10.1.2.35' - 224, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_224_'ns_1@10.1.2.31' - 224, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_224 - 224, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_224 - VBucket <224> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20434.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"á"}, {checkpoints,[{225,0}]}, {name,"rebalance_225"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20434.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 225 [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20434.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20433.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_225_'ns_1@10.1.2.35'">>, <<"replication_building_225_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20433.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20432.0>,shutdown} [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',225}] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20448.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßà", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20448.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàá") [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_225_'ns_1@10.1.2.35' - 225, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_225_'ns_1@10.1.2.31' - 225, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_225 - 225, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_225 - VBucket <225> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20455.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"â"}, {checkpoints,[{226,0}]}, {name,"rebalance_226"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20455.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 226 [rebalance:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20455.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20454.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_226_'ns_1@10.1.2.35'">>, <<"replication_building_226_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20454.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20453.0>,shutdown} [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',226}] [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20469.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßàá", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:49] [ns_1@10.1.2.30:<0.20469.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàáâ") [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_226_'ns_1@10.1.2.35' - 226, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_226_'ns_1@10.1.2.31' - 226, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_226 - 226, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_226 - VBucket <226> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20476.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ã"}, {checkpoints,[{227,0}]}, {name,"rebalance_227"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20476.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 227 [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20476.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20475.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_227_'ns_1@10.1.2.35'">>, <<"replication_building_227_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20475.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20474.0>,shutdown} [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',227}] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20490.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßàáâ", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20490.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàáâã") [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_227_'ns_1@10.1.2.35' - 227, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_227_'ns_1@10.1.2.31' - 227, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_227 - 227, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_227 - VBucket <227> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20497.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ä"}, {checkpoints,[{228,0}]}, {name,"rebalance_228"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20497.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 228 [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20497.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20496.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_228_'ns_1@10.1.2.35'">>, <<"replication_building_228_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20496.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20495.0>,shutdown} [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',228}] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20511.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßàáâã", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20511.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàáâãä") [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÉÊËÌÍÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_228_'ns_1@10.1.2.31' - 228, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_228_'ns_1@10.1.2.35' - 228, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_228 - 228, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_228 - VBucket <228> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20518.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"å"}, {checkpoints,[{229,0}]}, {name,"rebalance_229"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20518.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 229 [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20518.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20517.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_229_'ns_1@10.1.2.35'">>, <<"replication_building_229_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20517.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20516.0>,shutdown} [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',229}] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20532.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßàáâãä", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20532.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàáâãäå") [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_229_'ns_1@10.1.2.35' - 229, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_229_'ns_1@10.1.2.31' - 229, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_229 - 229, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_229 - VBucket <229> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÊËÌÍÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "É" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÊÌÍÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ë" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20548.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"æ"}, {checkpoints,[{230,0}]}, {name,"rebalance_230"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20548.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 230 [rebalance:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20548.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÌÍÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ê" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20538.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_230_'ns_1@10.1.2.35'">>, <<"replication_building_230_'ns_1@10.1.2.31'">>] [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:<0.20538.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20537.0>,shutdown} [ns_server:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_230_'ns_1@10.1.2.35' - 230, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_230_'ns_1@10.1.2.31' - 230, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_230 - 230, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_230 - VBucket <230> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÍÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ì" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÎÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:50] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Í" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÏÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Î" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÐÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ï" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,"ÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "Ð" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,"ÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,212,214,215,216, 217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "Õ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"ÑÒÓ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "Ô" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"ÑÒ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "Ó" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,"Ñ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "Ò" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,214,215,216,217, 218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "Ñ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,216,217,218, 219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "Ö" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,216,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,216,218,219, 220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: [] Cleanup: "Ù" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:ns_config_rep:ns_config_rep:do_pull:258] Pulling config from: 'ns_1@10.1.2.31' [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,215,218,219,220, 221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "Ø" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,218,219,220,221, 222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "×" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Ú" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:51] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Û" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,221,222,223,224, 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Ü" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,222,223,224,225, 226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Ý" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.31',230}] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20757.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', {new_child_id, "Þßàáâãäå", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20757.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.31', "Þßàáâãäåæ") [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,223,224,225,226, 227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "Þ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,224,225,226,227, 228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "ß" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,225,226,227,228, 229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245, 246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "à" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,226,227,228,229, 230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246, 247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "á" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20786.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ç"}, {checkpoints,[{231,0}]}, {name,"rebalance_231"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20786.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 231 [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20786.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_231_'ns_1@10.1.2.32' - 231, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_231_'ns_1@10.1.2.35' - 231, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_231 - 231, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_231 - VBucket <231> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20763.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_231_'ns_1@10.1.2.35'">>, <<"replication_building_231_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20763.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20762.0>,shutdown} [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,227,228,229,230, 231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: "â" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180]}] [views:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: "ã" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',231}] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20846.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "ç") [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state rebalancing: {rebalancing_state,<0.13296.0>, {dict,6,16,16,8,80,48, {[],[],[],[],[],[],[],[],[],[],[],[], [],[],[],[]}, {{[['ns_1@10.1.2.30'| 0.919732441471572]], [['ns_1@10.1.2.31'| 0.9767441860465116]], [['ns_1@10.1.2.32'| 0.9767441860465116]], [['ns_1@10.1.2.33'| 0.9767441860465116]], [['ns_1@10.1.2.34'| 0.9761904761904762]], [['ns_1@10.1.2.35'| 0.4285714285714286]], [],[],[],[],[],[],[],[],[],[]}}}} [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20853.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"è"}, {checkpoints,[{232,0}]}, {name,"rebalance_232"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20853.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 232 [rebalance:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20853.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20851.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_232_'ns_1@10.1.2.35'">>, <<"replication_building_232_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20851.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20850.0>,shutdown} [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',232}] [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20868.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "ç", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:<0.20868.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çè") [ns_server:info] [2012-04-10 18:23:52] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_232_'ns_1@10.1.2.32' - 232, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_232_'ns_1@10.1.2.35' - 232, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_232 - 232, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_232 - VBucket <232> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_233_'ns_1@10.1.2.32' - 233, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_233_'ns_1@10.1.2.35' - 233, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20883.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"é"}, {checkpoints,[{233,0}]}, {name,"rebalance_233"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20883.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 233 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20883.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20873.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_233_'ns_1@10.1.2.35'">>, <<"replication_building_233_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20873.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20872.0>,shutdown} [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',233}] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20898.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çè", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20898.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèé") [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20904.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ê"}, {checkpoints,[{234,0}]}, {name,"rebalance_234"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20904.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 234 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20904.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20903.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_234_'ns_1@10.1.2.35'">>, <<"replication_building_234_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20903.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20902.0>,shutdown} [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',234}] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_233 - 233, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_233 - VBucket <233> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_234_'ns_1@10.1.2.35' - 234, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_234_'ns_1@10.1.2.32' - 234, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_234 - 234, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_234 - VBucket <234> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20919.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çèé", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20919.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèéê") [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20925.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ë"}, {checkpoints,[{235,0}]}, {name,"rebalance_235"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20925.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 235 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20925.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20924.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_235_'ns_1@10.1.2.35'">>, <<"replication_building_235_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20924.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20923.0>,shutdown} [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',235}] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20940.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çèéê", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20940.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèéêë") [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_235_'ns_1@10.1.2.32' - 235, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_235_'ns_1@10.1.2.35' - 235, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_235 - 235, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_235 - VBucket <235> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_236_'ns_1@10.1.2.35' - 236, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_236_'ns_1@10.1.2.32' - 236, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20946.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ì"}, {checkpoints,[{236,0}]}, {name,"rebalance_236"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20946.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 236 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20946.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20945.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_236_'ns_1@10.1.2.35'">>, <<"replication_building_236_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20945.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20944.0>,shutdown} [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',236}] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20961.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çèéêë", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20961.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèéêëì") [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20967.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"í"}, {checkpoints,[{237,0}]}, {name,"rebalance_237"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20967.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 237 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20967.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20966.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_237_'ns_1@10.1.2.35'">>, <<"replication_building_237_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20966.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20965.0>,shutdown} [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',237}] [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20982.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çèéêëì", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20982.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèéêëìí") [ns_server:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_236 - 236, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_236 - VBucket <236> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_237_'ns_1@10.1.2.35' - 237, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_237_'ns_1@10.1.2.32' - 237, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_237 - 237, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_237 - VBucket <237> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20988.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"î"}, {checkpoints,[{238,0}]}, {name,"rebalance_238"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20988.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 238 [rebalance:info] [2012-04-10 18:23:53] [ns_1@10.1.2.30:<0.20988.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.20987.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_238_'ns_1@10.1.2.35'">>, <<"replication_building_238_'ns_1@10.1.2.32'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.20987.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.20986.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.32',238}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21003.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', {new_child_id, "çèéêëìí", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21003.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.32', "çèéêëìíî") [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_238_'ns_1@10.1.2.35' - 238, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_238_'ns_1@10.1.2.32' - 238, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_238 - 238, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_238 - VBucket <238> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_239_'ns_1@10.1.2.35' - 239, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_239_'ns_1@10.1.2.33' - 239, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21009.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ï"}, {checkpoints,[{239,0}]}, {name,"rebalance_239"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21009.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 239 [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21009.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21008.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_239_'ns_1@10.1.2.35'">>, <<"replication_building_239_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21008.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21007.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',239}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21025.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ï") [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21030.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ð"}, {checkpoints,[{240,0}]}, {name,"rebalance_240"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21030.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 240 [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21030.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21029.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_240_'ns_1@10.1.2.35'">>, <<"replication_building_240_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21029.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21028.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',240}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_239 - 239, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_239 - VBucket <239> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_240_'ns_1@10.1.2.35' - 240, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_240_'ns_1@10.1.2.33' - 240, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_240 - 240, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_240 - VBucket <240> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21046.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ï", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21046.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïð") [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21051.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ñ"}, {checkpoints,[{241,0}]}, {name,"rebalance_241"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21051.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 241 [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21051.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21050.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_241_'ns_1@10.1.2.35'">>, <<"replication_building_241_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21050.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21049.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',241}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21067.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïð", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21067.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñ") [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_241_'ns_1@10.1.2.33' - 241, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_241_'ns_1@10.1.2.35' - 241, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_241 - 241, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_241 - VBucket <241> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_242_'ns_1@10.1.2.35' - 242, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_242_'ns_1@10.1.2.33' - 242, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21079.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ò"}, {checkpoints,[{242,0}]}, {name,"rebalance_242"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21079.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 242 [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21079.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21071.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_242_'ns_1@10.1.2.35'">>, <<"replication_building_242_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21071.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21070.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',242}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21096.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïðñ", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21096.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñò") [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21101.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ó"}, {checkpoints,[{243,0}]}, {name,"rebalance_243"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21101.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 243 [rebalance:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21101.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_242 - 242, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_242 - VBucket <242> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_243_'ns_1@10.1.2.33' - 243, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_243_'ns_1@10.1.2.35' - 243, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_243 - 243, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_243 - VBucket <243> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21100.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_243_'ns_1@10.1.2.35'">>, <<"replication_building_243_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21100.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21099.0>,shutdown} [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',243}] [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21117.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïðñò", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:54] [ns_1@10.1.2.30:<0.21117.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñòó") [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21123.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ô"}, {checkpoints,[{244,0}]}, {name,"rebalance_244"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21123.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 244 [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21123.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21121.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_244_'ns_1@10.1.2.35'">>, <<"replication_building_244_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21121.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21120.0>,shutdown} [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',244}] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21139.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïðñòó", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21139.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñòóô") [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_244_'ns_1@10.1.2.35' - 244, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_244_'ns_1@10.1.2.33' - 244, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_244 - 244, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_244 - VBucket <244> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_245_'ns_1@10.1.2.35' - 245, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_245_'ns_1@10.1.2.33' - 245, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21144.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"õ"}, {checkpoints,[{245,0}]}, {name,"rebalance_245"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21144.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 245 [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21144.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21143.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_245_'ns_1@10.1.2.35'">>, <<"replication_building_245_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21143.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21142.0>,shutdown} [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',245}] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21160.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïðñòóô", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21160.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñòóôõ") [views:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,228,229,230,231, 232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248, 249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21165.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ö"}, {checkpoints,[{246,0}]}, {name,"rebalance_246"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21165.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 246 [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21165.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21164.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_246_'ns_1@10.1.2.35'">>, <<"replication_building_246_'ns_1@10.1.2.33'">>] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21164.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21163.0>,shutdown} [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.33',246}] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_245 - 245, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_245 - VBucket <245> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_246_'ns_1@10.1.2.33' - 246, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_246_'ns_1@10.1.2.35' - 246, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_246 - 246, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_246 - VBucket <246> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21181.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', {new_child_id, "ïðñòóôõ", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21181.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.33', "ïðñòóôõö") [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21186.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"÷"}, {checkpoints,[{247,0}]}, {name,"rebalance_247"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21186.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 247 [rebalance:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21186.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21185.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_247_'ns_1@10.1.2.35'">>, <<"replication_building_247_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21185.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21184.0>,shutdown} [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',247}] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:<0.21203.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷") [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_247_'ns_1@10.1.2.34' - 247, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_247_'ns_1@10.1.2.35' - 247, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_247 - 247, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_247 - VBucket <247> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,229,230,231,232, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249, 250,251,252,253,254,255] Passive: [] Cleanup: "ä" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:55] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_248_'ns_1@10.1.2.34' - 248, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_248_'ns_1@10.1.2.35' - 248, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21208.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ø"}, {checkpoints,[{248,0}]}, {name,"rebalance_248"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21208.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 248 [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21208.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21206.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_248_'ns_1@10.1.2.35'">>, <<"replication_building_248_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21206.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21205.0>,shutdown} [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,229,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,229,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250, 251,252,253,254,255] Passive: [] Cleanup: "æ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,231,232,233,234, 235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255] Passive: [] Cleanup: "å" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,231,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,231,233,234,235, 236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, 253,254,255] Passive: [] Cleanup: "è" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_248 - 248, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_248 - VBucket <248> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,234,235,236, 237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, 254,255] Passive: [] Cleanup: "ç" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,235,236,237, 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255] Passive: [] Cleanup: "ê" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254, 255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,233,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "ë" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "é" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',248}] [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21301.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21301.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷ø") [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,236,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,236,238,239,240, 241,242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "í" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,238,239,240,241, 242,243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "ì" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,239,240,241,242, 243,244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "î" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,240,241,242,243, 244,245,246,247,248,249,250,251,252,253,254,255] Passive: [] Cleanup: "ï" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_249_'ns_1@10.1.2.34' - 249, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_249_'ns_1@10.1.2.35' - 249, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21343.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ù"}, {checkpoints,[{249,0}]}, {name,"rebalance_249"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21343.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 249 [rebalance:info] [2012-04-10 18:23:56] [ns_1@10.1.2.30:<0.21343.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21305.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_249_'ns_1@10.1.2.35'">>, <<"replication_building_249_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21305.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21304.0>,shutdown} [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',249}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21370.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷ø", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21370.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øù") [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_249 - 249, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_249 - VBucket <249> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_250_'ns_1@10.1.2.35' - 250, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_250_'ns_1@10.1.2.34' - 250, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21374.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ú"}, {checkpoints,[{250,0}]}, {name,"rebalance_250"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21374.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 250 [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21374.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21373.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_250_'ns_1@10.1.2.35'">>, <<"replication_building_250_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21373.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21372.0>,shutdown} [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',250}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21391.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øù", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21391.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùú") [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21395.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"û"}, {checkpoints,[{251,0}]}, {name,"rebalance_251"}, {takeover,true}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_250 - 250, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_250 - VBucket <250> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_251_'ns_1@10.1.2.35' - 251, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_251_'ns_1@10.1.2.34' - 251, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21395.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 251 [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21395.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21394.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_251_'ns_1@10.1.2.35'">>, <<"replication_building_251_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21394.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21393.0>,shutdown} [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',251}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21412.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øùú", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21412.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùúû") [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21416.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ü"}, {checkpoints,[{252,0}]}, {name,"rebalance_252"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21416.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 252 [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21416.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21415.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_252_'ns_1@10.1.2.35'">>, <<"replication_building_252_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21415.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21414.0>,shutdown} [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',252}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_251 - 251, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_251 - VBucket <251> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_252_'ns_1@10.1.2.35' - 252, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_252_'ns_1@10.1.2.34' - 252, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_252 - 252, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_252 - VBucket <252> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21433.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øùúû", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21433.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùúûü") [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21437.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ý"}, {checkpoints,[{253,0}]}, {name,"rebalance_253"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21437.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 253 [rebalance:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21437.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21436.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_253_'ns_1@10.1.2.35'">>, <<"replication_building_253_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21436.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21435.0>,shutdown} [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',253}] [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21454.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øùúûü", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:<0.21454.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùúûüý") [ns_server:info] [2012-04-10 18:23:57] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_253_'ns_1@10.1.2.35' - 253, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_253_'ns_1@10.1.2.34' - 253, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_253 - 253, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_253 - VBucket <253> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_254_'ns_1@10.1.2.35' - 254, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_254_'ns_1@10.1.2.34' - 254, ill is completed with VBuckets [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21469.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"þ"}, {checkpoints,[{254,0}]}, {name,"rebalance_254"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21469.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 254 [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21469.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21457.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_254_'ns_1@10.1.2.35'">>, <<"replication_building_254_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21457.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21456.0>,shutdown} [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',254}] [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21486.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øùúûüý", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21486.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùúûüýþ") [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21490.0>:ebucketmigrator_srv:init:167] Starting tap stream: [{vbuckets,"ÿ"}, {checkpoints,[{255,0}]}, {name,"rebalance_255"}, {takeover,true}] [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21490.0>:ebucketmigrator_srv:process_upstream:391] Initial stream for vbucket 255 [rebalance:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21490.0>:ebucketmigrator_srv:terminate:211] Skipping close ack for successfull takover [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21489.0>:ns_replicas_builder:kill_a_bunch_of_tap_names:207] Killed the following tap names on 'ns_1@10.1.2.30': [<<"replication_building_255_'ns_1@10.1.2.35'">>, <<"replication_building_255_'ns_1@10.1.2.34'">>] [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:<0.21489.0>:ns_replicas_builder:build_replicas_main:131] Got exit: {'EXIT',<0.21488.0>,shutdown} [ns_server:info] [2012-04-10 18:23:58] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_254 - 254, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_254 - VBucket <254> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_255_'ns_1@10.1.2.35' - 255, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:replication_building_255_'ns_1@10.1.2.34' - 255, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_255 - 255, ill is completed with VBuckets memcached<0.396.0>: TAP (Producer) eq_tapq:rebalance_255 - VBucket <255> is going dead to complete vbucket takeover. memcached<0.396.0>: TAP takeover is completed. Disconnecting tap stream [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,244,245,246,247, 248,249,250,251,252,253,254,255]}, {passive,[]}, {ignore,"ðñòó"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,244,245,246,247, 248,249,250,251,252,253,254,255] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"ðñòóôõö÷øùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"ñòóôõö÷øùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ð" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [ns_server:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:<0.13357.0>:cb_gen_vbm_sup:apply_changes:100] Applying changes: [{add_replica,'ns_1@10.1.2.35','ns_1@10.1.2.34',255}] [ns_server:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:<0.21600.0>:cb_gen_vbm_sup:set_node_replicas:405] kill_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', {new_child_id, "÷øùúûüýþ", 'ns_1@10.1.2.35'}) [ns_server:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:<0.21600.0>:cb_gen_vbm_sup:set_node_replicas:416] start_child(ns_vbm_new_sup, "default", 'ns_1@10.1.2.35', 'ns_1@10.1.2.34', "÷øùúûüýþÿ") [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"òóôõö÷øùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ñ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óôõö÷øùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:00] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ò" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óôõ÷øùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ö" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óôõ÷ùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ø" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷ùúûüýþÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "õ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷ùúûüýÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "þ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷ùûüýÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ú" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷ûüýÿ"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ù" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷ûüý"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ÿ" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"óô÷üý"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "û" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"ô÷üý"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ó" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"ô÷ý"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ü" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"ô÷"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ý" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,"÷"}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "ô" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:01] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: "÷" Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [user:info] [2012-04-10 18:24:02] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:233] Rebalance completed successfully. [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.18864.0>:couch_log:info:39] 10.1.2.49 - - PUT /default/_design/dev_test_view-ae8b05e 201 [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] Calling couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, {set_view_params,256,[],[],true}]) [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:couch_set_view:couch_log:info:39] couch_set_view spawned worker {<0.22222.0>,#Ref<0.0.1.190134>} to open set view group `_design/dev_test_view-ae8b05e`, set `default`, signature `9c2007099024ffc9a953e58ece0d6178`, new waiting list: [{<0.10204.0>,#Ref<0.0.1.190133>}] [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Started undefined main set view group `default`, group `_design/dev_test_view-ae8b05e` [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22222.0>:couch_log:info:39] couch_set_view opener worker <0.22222.0> for set view group `_design/dev_test_view-ae8b05e`, set `default`, signature `9c2007099024ffc9a953e58ece0d6178`, finishing with reply {ok, <0.22223.0>} [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, linked PID <0.22224.0> stopped normally [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:couch_set_view:couch_log:info:39] couch_set_view set view group `_design/dev_test_view-ae8b05e`, set `default`, signature `9c2007099024ffc9a953e58ece0d6178`, opener worker {#Ref<0.0.1.190134>,<0.22222.0>} finished. Replying with {ok,<0.22223.0>} to waiting list: [{<0.10204.0>,#Ref<0.0.1.190133>}] [ns_server:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22240.0>:ns_port_sup:restart_port:134] restarting port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22235.0>:couch_log:info:39] Started undefined replica set view group `default`, group `_design/dev_test_view-ae8b05e` [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22235.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view-ae8b05e`, linked PID <0.22236.0> stopped normally [ns_server:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.10070.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.10070.0>:ns_port_server:log:166] moxi<0.10070.0>: EOL on stdin. Exiting [error_logger:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.22248.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22235.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view-ae8b05e`, configured with: 256 partitions no replica support initial active partitions [] initial passive partitions [] [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, configured with: 256 partitions replica support initial active partitions [] initial passive partitions [] [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:define_group:373] couch_set_view:define_group([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, {set_view_params,256,[],[],true}]) returned ok in 28ms [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:393] Applying map to bucket default (ddoc _design/dev_test_view-ae8b05e): [{active,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42]}, {passive,[]}, {ignore,[]}, {replica,[43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215, 216,217,218,219,220,221]}] [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:399] Classified vbuckets for "default" (ddoc _design/dev_test_view-ae8b05e): Active: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25, 26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive: [] Cleanup: [] Replica: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131, 132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216, 217,218,219,220,221] ReplicaCleanup: [] [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:418] Calling couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13, 14,15,16,17,18,19,20,21,22,23, 24,25,26,27,28,29,30,31,32,33, 34,35,36,37,38,39,40,41,42], [],[]]) [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, partition states updated active partitions before: [] active partitions after: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] passive partitions before: [] passive partitions after: [] cleanup partitions before: [] cleanup partitions after: [] replica partitions before: [] replica partitions after: [] replicas on transfer before: [] replicas on transfer after: [] [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:419] couch_set_view:set_partition_states([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28, 29,30,31,32,33,34,35,36,37,38,39,40,41, 42], [],[]]) returned ok in 16ms [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:423] Calling couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, [43,44,45,46,47,48,49,50,51,86, 87,88,89,90,91,92,93,94,129, 130,131,132,133,134,135,136, 172,173,174,175,176,177,178, 179,180,214,215,216,217,218, 219,220,221]]) [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22235.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view-ae8b05e`, partition states updated active partitions before: [] active partitions after: [] passive partitions before: [] passive partitions after: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131,132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216,217,218,219,220,221] cleanup partitions before: [] cleanup partitions after: [] [couchdb:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, defined new replica partitions: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131,132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216,217,218,219,220,221] New full set of replica partitions is: [43,44,45,46,47,48,49,50,51,86,87,88,89,90,91,92,93,94,129,130,131,132,133,134,135,136,172,173,174,175,176,177,178,179,180,214,215,216,217,218,219,220,221] [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:424] couch_set_view:add_replica_partitions([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, [43,44,45,46,47,48,49,50,51,86,87,88, 89,90,91,92,93,94,129,130,131,132,133, 134,135,136,172,173,174,175,176,177, 178,179,180,214,215,216,217,218,219, 220,221]]) returned ok in 15ms [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:425] Calling couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, []]) [views:info] [2012-04-10 18:24:14] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_ddoc_map:426] couch_set_view:remove_replica_partitions([<<"default">>, <<"_design/dev_test_view-ae8b05e">>, []]) returned ok in 0ms [ns_server:info] [2012-04-10 18:24:15] [ns_1@10.1.2.30:<0.22249.0>:ns_port_server:log:166] moxi<0.22249.0>: 2012-04-10 18:24:14: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.22249.0>: 2012-04-10 18:24:14: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [ns_server:info] [2012-04-10 18:24:35] [ns_1@10.1.2.30:ns_config_rep:ns_config_rep:do_pull:258] Pulling config from: 'ns_1@10.1.2.35' [couchdb:info] [2012-04-10 18:24:37] [ns_1@10.1.2.30:<0.22426.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_all_docs 200 [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Starting updater for set view `default`, main group `_design/dev_test_view-ae8b05e` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22570.0>:couch_log:info:39] Updater for set view `default`, main group `_design/dev_test_view-ae8b05e` started Active partitions: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive partitions: [] Active partitions update seqs: [{0,66},{1,68},{2,19},{3,13},{4,13},{5,19},{6,68},{7,66},{8,68},{9,66},{10,16},{11,21},{12,21},{13,16},{14,66},{15,68},{16,20},{17,16},{18,66},{19,66},{20,66},{21,66},{22,16},{23,20},{24,13},{25,19},{26,66},{27,66},{28,66},{29,66},{30,19},{31,13},{32,13},{33,20},{34,71},{35,71},{36,71},{37,71},{38,20},{39,13},{40,19},{41,13},{42,65}] Active partitions indexed update seqs: [{0,0},{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0},{19,0},{20,0},{21,0},{22,0},{23,0},{24,0},{25,0},{26,0},{27,0},{28,0},{29,0},{30,0},{31,0},{32,0},{33,0},{34,0},{35,0},{36,0},{37,0},{38,0},{39,0},{40,0},{41,0},{42,0}] Passive partitions update seqs: [] Passive partitions indexed update seqs: [] Active partitions # docs: [{0,56},{1,54},{2,19},{3,9},{4,9},{5,19},{6,54},{7,56},{8,54},{9,56},{10,8},{11,19},{12,19},{13,8},{14,56},{15,54},{16,20},{17,8},{18,56},{19,54},{20,54},{21,56},{22,8},{23,20},{24,9},{25,19},{26,54},{27,56},{28,56},{29,54},{30,19},{31,9},{32,9},{33,18},{34,61},{35,59},{36,59},{37,61},{38,18},{39,9},{40,19},{41,11},{42,65}] Active partitions # deleted docs: [{0,1099511627771},{1,1099511627769},{2,0},{3,1099511627774},{4,1099511627774},{5,0},{6,1099511627769},{7,1099511627771},{8,1099511627769},{9,1099511627771},{10,1099511627772},{11,1099511627775},{12,1099511627775},{13,1099511627772},{14,1099511627771},{15,1099511627769},{16,0},{17,1099511627772},{18,1099511627771},{19,1099511627770},{20,1099511627770},{21,1099511627771},{22,1099511627772},{23,0},{24,1099511627774},{25,0},{26,1099511627770},{27,1099511627771},{28,1099511627771},{29,1099511627770},{30,0},{31,1099511627774},{32,1099511627774},{33,1099511627775},{34,1099511627771},{35,1099511627770},{36,1099511627770},{37,1099511627771},{38,1099511627775},{39,1099511627774},{40,0},{41,1099511627775},{42,0}] Passive partitions # docs: [] Passive partitions # deleted docs: [] Replicas to transfer: [] [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/0 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22479.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view-ae8b05e/_view/dev_test_view-ae8b05e?connection_timeout=60000&full_set=true 200 [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/1 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/2 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/3 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/4 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/5 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/6 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/7 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/8 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/9 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/10 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/11 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/12 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/13 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/14 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/15 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/16 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/17 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/18 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/19 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/20 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/21 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/22 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/23 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/24 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/25 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/26 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/27 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/28 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/29 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/30 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/31 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/32 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/33 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/34 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/35 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/36 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/37 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/38 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/39 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/40 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/41 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22576.0>:couch_log:info:39] Reading changes (since sequence 0) from active partition default/42 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:39] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, updater finished Indexing time: 0.679 seconds Blocked time: 0.000 seconds Inserted IDs: 1485 Deleted IDs: 0 Inserted KVs: 1485 Deleted KVs: 0 Cleaned KVs: 0 [ns_server:info] [2012-04-10 18:24:40] [ns_1@10.1.2.30:ns_config_rep:ns_config_rep:do_pull:258] Pulling config from: 'ns_1@10.1.2.35' [couchdb:info] [2012-04-10 18:24:43] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, compaction starting [couchdb:info] [2012-04-10 18:24:43] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, linked PID <0.22634.0> stopped normally [couchdb:info] [2012-04-10 18:24:43] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, compaction complete in 0.092 seconds, filtered 0 key-value pairs [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Starting updater for set view `default`, main group `_design/dev_test_view-ae8b05e` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22691.0>:couch_log:info:39] Updater for set view `default`, main group `_design/dev_test_view-ae8b05e` started Active partitions: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42] Passive partitions: [] Active partitions update seqs: [{0,67},{1,69},{2,19},{3,13},{4,13},{5,19},{6,69},{7,67},{8,69},{9,67},{10,16},{11,21},{12,21},{13,16},{14,67},{15,69},{16,21},{17,16},{18,67},{19,67},{20,67},{21,67},{22,16},{23,21},{24,13},{25,19},{26,67},{27,67},{28,67},{29,67},{30,19},{31,13},{32,13},{33,20},{34,73},{35,72},{36,72},{37,73},{38,20},{39,13},{40,19},{41,13},{42,72}] Active partitions indexed update seqs: [{0,66},{1,68},{2,19},{3,13},{4,13},{5,19},{6,68},{7,66},{8,68},{9,66},{10,16},{11,21},{12,21},{13,16},{14,66},{15,68},{16,20},{17,16},{18,66},{19,66},{20,66},{21,66},{22,16},{23,20},{24,13},{25,19},{26,66},{27,66},{28,66},{29,66},{30,19},{31,13},{32,13},{33,20},{34,71},{35,71},{36,71},{37,71},{38,20},{39,13},{40,19},{41,13},{42,71}] Passive partitions update seqs: [] Passive partitions indexed update seqs: [] Active partitions # docs: [{0,55},{1,53},{2,19},{3,9},{4,9},{5,19},{6,53},{7,55},{8,53},{9,55},{10,8},{11,19},{12,19},{13,8},{14,55},{15,53},{16,19},{17,8},{18,55},{19,53},{20,53},{21,55},{22,8},{23,19},{24,9},{25,19},{26,53},{27,55},{28,55},{29,53},{30,19},{31,9},{32,9},{33,18},{34,59},{35,58},{36,58},{37,59},{38,18},{39,9},{40,19},{41,11},{42,58}] Active partitions # deleted docs: [{0,1099511627770},{1,1099511627768},{2,0},{3,1099511627774},{4,1099511627774},{5,0},{6,1099511627768},{7,1099511627770},{8,1099511627768},{9,1099511627770},{10,1099511627772},{11,1099511627775},{12,1099511627775},{13,1099511627772},{14,1099511627770},{15,1099511627768},{16,1099511627775},{17,1099511627772},{18,1099511627770},{19,1099511627769},{20,1099511627769},{21,1099511627770},{22,1099511627772},{23,1099511627775},{24,1099511627774},{25,0},{26,1099511627769},{27,1099511627770},{28,1099511627770},{29,1099511627769},{30,0},{31,1099511627774},{32,1099511627774},{33,1099511627775},{34,1099511627769},{35,1099511627769},{36,1099511627769},{37,1099511627769},{38,1099511627775},{39,1099511627774},{40,0},{41,1099511627775},{42,1099511627769}] Passive partitions # docs: [] Passive partitions # deleted docs: [] Replicas to transfer: [] [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/0 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22438.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view-ae8b05e/_view/dev_test_view-ae8b05e?connection_timeout=60000&full_set=true 200 [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 68) from active partition default/1 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 19) from active partition default/2 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/3 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/4 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 19) from active partition default/5 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 68) from active partition default/6 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/7 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 68) from active partition default/8 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/9 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 16) from active partition default/10 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 21) from active partition default/11 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 21) from active partition default/12 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 16) from active partition default/13 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/14 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 68) from active partition default/15 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 20) from active partition default/16 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 16) from active partition default/17 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/18 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/19 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/20 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/21 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 16) from active partition default/22 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 20) from active partition default/23 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/24 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 19) from active partition default/25 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/26 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/27 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/28 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 66) from active partition default/29 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 19) from active partition default/30 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/31 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/32 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 20) from active partition default/33 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 71) from active partition default/34 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 71) from active partition default/35 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 71) from active partition default/36 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 71) from active partition default/37 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 20) from active partition default/38 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/39 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 19) from active partition default/40 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 13) from active partition default/41 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22697.0>:couch_log:info:39] Reading changes (since sequence 71) from active partition default/42 to update main set view group `_design/dev_test_view-ae8b05e` from set `default` [couchdb:info] [2012-04-10 18:24:49] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, updater finished Indexing time: 0.138 seconds Blocked time: 0.000 seconds Inserted IDs: 0 Deleted IDs: 25 Inserted KVs: 0 Deleted KVs: 25 Cleaned KVs: 0 [couchdb:info] [2012-04-10 18:24:55] [ns_1@10.1.2.30:<0.22441.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view-ae8b05e/_view/dev_test_view-ae8b05e?connection_timeout=60000&full_set=true 200 [couchdb:info] [2012-04-10 18:24:57] [ns_1@10.1.2.30:<0.22557.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view-ae8b05e/_view/dev_test_view-ae8b05e?connection_timeout=60000&full_set=true 200 [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.22449.0>:couch_log:info:39] 10.1.2.49 - - GET /default/_design/dev_test_view-ae8b05e 200 [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.22796.0>:couch_log:info:39] 10.1.2.49 - - DELETE /default/_design/dev_test_view-ae8b05e 200 [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.22223.0>:couch_log:info:39] Set view `default`, main group `_design/dev_test_view-ae8b05e`, terminating with reason: normal [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.22235.0>:couch_log:info:39] Set view `default`, replica group `_design/dev_test_view-ae8b05e`, terminating with reason: shutdown [rebalance:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.20366.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [rebalance:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.19038.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [rebalance:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.17688.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [rebalance:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.16354.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [rebalance:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.14881.0>:ebucketmigrator_srv:do_confirm_sent_messages:243] Got close ack! [user:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:terminate:350] Shutting down bucket "default" on 'ns_1@10.1.2.30' for deletion [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:<0.22625.0>:couch_log:info:39] Shutting down spatial group server, monitored db is closing. [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/master">>: ok [couchdb:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:couch_set_view:couch_log:info:39] Deleting index files for set `default` because database partition `default/master` was deleted [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/0">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/1">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/10">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/11">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/12">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/129">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/13">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/130">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/131">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/132">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/133">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/134">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/135">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/136">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/14">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/15">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/16">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/17">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/172">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/173">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/174">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/175">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/176">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/177">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/178">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/179">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/18">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/180">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/19">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/2">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/20">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/21">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/214">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/215">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/216">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/217">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/218">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/219">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/22">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/220">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/221">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/23">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/24">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/25">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/26">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/27">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/28">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/29">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/3">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/30">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/31">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/32">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/33">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/34">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/35">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/36">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/37">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/38">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/39">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/4">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/40">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/41">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/42">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/43">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/44">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/45">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/46">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/47">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/48">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/49">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/5">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/50">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/51">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/6">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/7">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/8">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/86">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/87">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/88">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/89">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/9">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/90">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/91">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/92">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/93">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_storage_conf:delete_database:395] Deleting database <<"default/94">>: ok [ns_server:info] [2012-04-10 18:25:12] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Shutting down tap connections! [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:idle:345] Restarting moxi on nodes ['ns_1@10.1.2.30','ns_1@10.1.2.31','ns_1@10.1.2.32', 'ns_1@10.1.2.33','ns_1@10.1.2.34','ns_1@10.1.2.35'] [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.23024.0>:ns_port_sup:restart_port:134] restarting port: {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env,[{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]} [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.22249.0>:ns_port_server:handle_info:104] Port server moxi exited with status 0 [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.22249.0>:ns_port_server:log:166] moxi<0.22249.0>: EOL on stdin. Exiting [error_logger:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_port_sup} started: [{pid,<0.23025.0>}, {name, {moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]}}, {mfargs, {supervisor_cushion,start_link, [moxi,5000,ns_port_server,start_link, [moxi,"/opt/couchbase/bin/moxi", ["-Z", "port_listen=11211,default_bucket_name=default,downstream_max=1024,downstream_conn_max=4,connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400,auth_timeout=100,cycle=200,downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200", "-z", "url=http://127.0.0.1:8091/pools/default/saslBucketsStreaming", "-p","0","-Y","y","-O","stderr",[]], [{env, [{"EVENT_NOSELECT","1"}, {"MOXI_SASL_PLAIN_USR","Administrator"}, {"MOXI_SASL_PLAIN_PWD","password"}]}, use_stdio,exit_status,port_server_send_eol, stderr_to_stdout,stream]]]}}, {restart_type,permanent}, {shutdown,10000}, {child_type,worker}] [menelaus:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.22548.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [user:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:idle:371] Starting rebalance, KeepNodes = ['ns_1@10.1.2.30'], EjectNodes = ['ns_1@10.1.2.32', 'ns_1@10.1.2.34', 'ns_1@10.1.2.31', 'ns_1@10.1.2.35', 'ns_1@10.1.2.33'] [ns_server:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.32'} process down: shutdown [user:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:233] Rebalance completed successfully. [ns_server:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.31'} process down: shutdown [ns_server:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.34'} process down: shutdown [ns_server:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:xdc_rdoc_replication_srv:cb_generic_replication_srv:handle_info:114] Remote server node {xdc_rdoc_replication_srv,'ns_1@10.1.2.33'} process down: shutdown [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:ns_node_disco_events:ns_node_disco_log:handle_event:46] ns_node_disco_log: nodes changed: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:25:13] [ns_1@10.1.2.30:<0.23026.0>:ns_port_server:log:166] moxi<0.23026.0>: 2012-04-10 18:25:13: (cproxy_config.c.317) env: MOXI_SASL_PLAIN_USR (13) moxi<0.23026.0>: 2012-04-10 18:25:13: (cproxy_config.c.326) env: MOXI_SASL_PLAIN_PWD (8) [user:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:151] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.34' went down. [user:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:151] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.32' went down. [user:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:151] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.35' went down. [user:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:151] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.33' went down. [ns_server:warn] [2012-04-10 18:25:13] [ns_1@10.1.2.30:mb_master:mb_master:master:319] Master got candidate heartbeat from node 'ns_1@10.1.2.31' which is not in peers ['ns_1@10.1.2.30'] [user:warn] [2012-04-10 18:25:14] [ns_1@10.1.2.30:ns_node_disco:ns_node_disco:handle_info:151] Node 'ns_1@10.1.2.30' saw that node 'ns_1@10.1.2.31' went down. [menelaus:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:<0.23111.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [ns_server:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:<0.23194.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:25:41] [ns_1@10.1.2.30:<0.23112.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,ns_bucket_sup} started: [{pid,<0.23201.0>}, {name,{per_bucket_sup,"default"}}, {mfargs,{single_bucket_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [ns_server:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:ensure_bucket:713] Created bucket "default" with config string "ht_size=3079;ht_locks=5;tap_noop_interval=20;max_txn_size=10000;max_size=1435500544;tap_keepalive=300;dbname=/opt/couchbase/var/lib/couchdb/default;allow_data_loss_during_shutdown=true;backend=couchdb;couch_bucket=default;couch_port=11213;max_vbuckets=256;alog_path=/opt/couchbase/var/lib/couchdb/default/access.log;vb0=false;waitforwarmup=false;failpartialwarmup=false;" [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23205.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23212.0>}, {name,{ns_memcached,data,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",data}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23214.0>}, {name,{ns_vbm_sup,"default"}}, {mfargs,{ns_vbm_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23215.0>}, {name,{ns_vbm_new_sup,"default"}}, {mfargs,{ns_vbm_new_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23216.0>}, {name,{couch_stats_reader,"default"}}, {mfargs,{couch_stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23217.0>}, {name,{stats_collector,"default"}}, {mfargs,{stats_collector,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23219.0>}, {name,{stats_archiver,"default"}}, {mfargs,{stats_archiver,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23222.0>}, {name,{stats_reader,"default"}}, {mfargs,{stats_reader,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'ns_memcached_sup-default'} started: [{pid,<0.23223.0>}, {name,{failover_safeness_level,"default"}}, {mfargs, {failover_safeness_level,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.23203.0>}, {name,{ns_memcached_sup,"default"}}, {mfargs,{ns_memcached_sup,start_link,["default"]}}, {restart_type,permanent}, {shutdown,infinity}, {child_type,supervisor}] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.23224.0>}, {name,{capi_ddoc_replication_srv,"default"}}, {mfargs, {capi_ddoc_replication_srv,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 0 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 1 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 2 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 3 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 4 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 5 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 6 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 7 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 8 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 9 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 10 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 11 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 12 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 13 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 14 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 15 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 16 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 17 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 18 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 19 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 20 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 21 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 22 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 23 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 24 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 25 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 26 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 27 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 28 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 29 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 30 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 31 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 32 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 33 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 34 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 35 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 36 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 37 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 38 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 39 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 40 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 41 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 42 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 43 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 44 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 45 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 46 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 47 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 48 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 49 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 50 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 51 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 52 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 53 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 54 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 55 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 56 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 57 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 58 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 59 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 60 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 61 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 62 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 63 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 64 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 65 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 66 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 67 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 68 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 69 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 70 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 71 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 72 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 73 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 74 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 75 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 76 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 77 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 78 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 79 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 80 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 81 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 82 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 83 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 84 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 85 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 86 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 87 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 88 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 89 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 90 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 91 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 92 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 93 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 94 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 95 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 96 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 97 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 98 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 99 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 100 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 101 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 102 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 103 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 104 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 105 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 106 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 107 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 108 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 109 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 110 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 111 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 112 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 113 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 114 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 115 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 116 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 117 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 118 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 119 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 120 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 121 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 122 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 123 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 124 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 125 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 126 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 127 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 128 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 129 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 130 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 131 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 132 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 133 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 134 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 135 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 136 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 137 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 138 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 139 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 140 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 141 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 142 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 143 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 144 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 145 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 146 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 147 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 148 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 149 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 150 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 151 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 152 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 153 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 154 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 155 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 156 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 157 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 158 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 159 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 160 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 161 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 162 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 163 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 164 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 165 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 166 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 167 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 168 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 169 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 170 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 171 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 172 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 173 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 174 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 175 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 176 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 177 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 178 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 179 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 180 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 181 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 182 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 183 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 184 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 185 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 186 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 187 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 188 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 189 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 190 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 191 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 192 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 193 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 194 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 195 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 196 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 197 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 198 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 199 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 200 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 201 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 202 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 203 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 204 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 205 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 206 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 207 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 208 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 209 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 210 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 211 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 212 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 213 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 214 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 215 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 216 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 217 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 218 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 219 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 220 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 221 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 222 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 223 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 224 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 225 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 226 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 227 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 228 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 229 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 230 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 231 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 232 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 233 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 234 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 235 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 236 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 237 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 238 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 239 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 240 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 241 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 242 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 243 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 244 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 245 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 246 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 247 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 248 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 249 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 250 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 251 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 252 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 253 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 254 in <<"default">>: {not_found,no_db_file} [couchdb:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':couch_log:error:42] MC daemon: Error opening vb 255 in <<"default">>: {not_found,no_db_file} [views:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:445] Applying map to bucket default: [{active,[]},{passive,[]},{replica,[]},{ignore,[]}] [views:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:'capi_set_view_manager-default':capi_set_view_manager:apply_map:450] Classified vbuckets for default: Active: [] Passive: [] Cleanup: [] Replica: [] ReplicaCleanup: [] [error_logger:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,'single_bucket_sup-default'} started: [{pid,<0.23238.0>}, {name,{capi_set_view_manager,"default"}}, {mfargs,{capi_set_view_manager,start_link,["default"]}}, {restart_type,permanent}, {shutdown,1000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:25:41] [ns_1@10.1.2.30:ns_port_memcached:ns_port_server:log:166] memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Trying to connect to mccouch: "localhost:11213" memcached<0.396.0>: Connected to mccouch: "localhost:11213" memcached<0.396.0>: Extension support isn't implemented in this version of bucket_engine memcached<0.396.0>: Failed to load mutation log, falling back to key dump memcached<0.396.0>: metadata loaded in 357 usec memcached<0.396.0>: warmup completed in 517 usec [ns_server:info] [2012-04-10 18:25:42] [ns_1@10.1.2.30:<0.23026.0>:ns_port_server:log:166] moxi<0.23026.0>: 2012-04-10 18:25:43: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.23026.0>: "name": "default", moxi<0.23026.0>: "nodeLocator": "vbucket", moxi<0.23026.0>: "saslPassword": "", moxi<0.23026.0>: "nodes": [{ moxi<0.23026.0>: "couchApiBase": "http://10.1.2.30:8092/default", moxi<0.23026.0>: "replication": 0, moxi<0.23026.0>: "clusterMembership": "active", moxi<0.23026.0>: "status": "warmup", moxi<0.23026.0>: "thisNode": true, moxi<0.23026.0>: "hostname": "10.1.2.30:8091", moxi<0.23026.0>: "clusterCompatibility": 1, moxi<0.23026.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.23026.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.23026.0>: "ports": { moxi<0.23026.0>: "proxy": 11211, moxi<0.23026.0>: "direct": 11210 moxi<0.23026.0>: } moxi<0.23026.0>: }], moxi<0.23026.0>: "vBucketServerMap": { moxi<0.23026.0>: "hashAlgorithm": "CRC", moxi<0.23026.0>: "numReplicas": 1, moxi<0.23026.0>: "serverList": ["10.1.2.30:11210"], moxi<0.23026.0>: "vBucketMap": [] moxi<0.23026.0>: } moxi<0.23026.0>: }) [ns_server:info] [2012-04-10 18:25:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23194.0>} [ns_server:info] [2012-04-10 18:25:47] [ns_1@10.1.2.30:<0.23194.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:25:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23194.0>} [ns_server:info] [2012-04-10 18:25:53] [ns_1@10.1.2.30:<0.23194.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [ns_server:error] [2012-04-10 18:25:56] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:handle_call:140] call {stats,<<>>} took too long: 15042324 us [user:info] [2012-04-10 18:25:56] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:terminate:371] Control connection to memcached on 'ns_1@10.1.2.30' disconnected: {{badmatch, {error, timeout}}, [{mc_client_binary, stats_recv, 4, [{file, "src/mc_client_binary.erl"}, {line, 120}]}, {mc_client_binary, stats,4, [{file, "src/mc_client_binary.erl"}, {line, 305}]}, {ns_memcached, do_handle_call, 3, [{file, "src/ns_memcached.erl"}, {line, 269}]}, {ns_memcached, handle_call, 3, [{file, "src/ns_memcached.erl"}, {line, 134}]}, {gen_server, handle_msg, 5, [{file, "gen_server.erl"}, {line, 578}]}, {proc_lib, init_p_do_apply, 3, [{file, "proc_lib.erl"}, {line, 227}]}]} [ns_server:info] [2012-04-10 18:25:56] [ns_1@10.1.2.30:<0.23194.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:25:56] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_msg:76] ** Generic server 'ns_memcached-default' terminating ** Last message in was {stats,<<>>} ** When Server state == {state,{interval,#Ref<0.0.2.49823>}, init, {1334,107541,800859}, "default",#Port<0.24062>,stats} ** Reason for termination == ** {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4,[{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,do_handle_call,3, [{file,"src/ns_memcached.erl"},{line,269}]}, {ns_memcached,handle_call,3,[{file,"src/ns_memcached.erl"},{line,134}]}, {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} [error_logger:error] [2012-04-10 18:25:56] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.23205.0> registered_name: 'ns_memcached-default' exception exit: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,do_handle_call,3, [{file,"src/ns_memcached.erl"},{line,269}]}, {ns_memcached,handle_call,3, [{file,"src/ns_memcached.erl"},{line,134}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:terminate/6 (gen_server.erl, line 737) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.52455>},connected}, check_started, {'$gen_call',{<0.23506.0>, {#Ref<0.0.2.52565>,'ns_1@10.1.2.30'}}, connected}, check_started, {'$gen_call',{<0.23509.0>,#Ref<0.0.2.52612>},topkeys}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.53048>},connected}, check_started,check_started,check_started, {'$gen_call',{<0.23542.0>, {#Ref<0.0.2.53217>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.53683>},connected}, check_started,check_started,check_started,check_started, check_started, {'$gen_call',{<0.23578.0>, {#Ref<0.0.2.53934>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started] links: [<0.56.0>,<0.23203.0>] dictionary: [] trap_exit: true status: running heap_size: 75025 stack_size: 24 reductions: 12036 neighbours: [error_logger:error] [2012-04-10 18:25:56] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: child_terminated Reason: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,do_handle_call,3, [{file,"src/ns_memcached.erl"},{line,269}]}, {ns_memcached,handle_call,3, [{file,"src/ns_memcached.erl"},{line,134}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,<0.23205.0>}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:25:56] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,do_handle_call,3, [{file,"src/ns_memcached.erl"},{line,269}]}, {ns_memcached,handle_call,3, [{file,"src/ns_memcached.erl"},{line,134}]}, {gen_server,handle_msg,5, [{file,"gen_server.erl"},{line,578}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.388.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:56] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23590.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:25:57] [ns_1@10.1.2.30:<0.23582.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:25:57] [ns_1@10.1.2.30:<0.23183.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:25:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.389.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:25:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23600.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:25:58] [ns_1@10.1.2.30:<0.23586.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:25:58] [ns_1@10.1.2.30:<0.23594.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:25:59] [ns_1@10.1.2.30:<0.23596.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:25:59] [ns_1@10.1.2.30:<0.23186.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:00] [ns_1@10.1.2.30:<0.23603.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:00] [ns_1@10.1.2.30:<0.23607.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:00] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23600.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:00] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23618.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:01] [ns_1@10.1.2.30:<0.23609.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:01] [ns_1@10.1.2.30:<0.23187.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:02] [ns_1@10.1.2.30:<0.18371.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:02] [ns_1@10.1.2.30:<0.23613.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:02] [ns_1@10.1.2.30:<0.23619.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:02] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23194.0>} [stats:error] [2012-04-10 18:26:03] [ns_1@10.1.2.30:<0.23621.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:03] [ns_1@10.1.2.30:<0.23188.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:03] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23618.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:03] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23639.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:04] [ns_1@10.1.2.30:<0.23630.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:04] [ns_1@10.1.2.30:<0.23634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:05] [ns_1@10.1.2.30:<0.23636.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:05] [ns_1@10.1.2.30:<0.23189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:06] [ns_1@10.1.2.30:<0.23644.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:06] [ns_1@10.1.2.30:<0.23646.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:06] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23639.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:06] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23660.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:07] [ns_1@10.1.2.30:<0.23648.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:07] [ns_1@10.1.2.30:<0.23190.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:08] [ns_1@10.1.2.30:<0.23654.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:08] [ns_1@10.1.2.30:<0.23661.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:09] [ns_1@10.1.2.30:<0.23663.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:09] [ns_1@10.1.2.30:<0.23196.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23660.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23676.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:10] [ns_1@10.1.2.30:<0.23669.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:10] [ns_1@10.1.2.30:<0.23671.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:<0.23673.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:<0.23513.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6,[{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,undefined}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:26:11] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason {{{badmatch, {error,timeout}}, [{mc_client_binary, stats_recv,4, [{file, "src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary, stats,4, [{file, "src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached, ensure_bucket_config, 4, [{file, "src/ns_memcached.erl"}, {line,743}]}, {ns_memcached,init,1, [{file, "src/ns_memcached.erl"}, {line,112}]}, {gen_server,init_it,6, [{file, "gen_server.erl"}, {line,297}]}, {proc_lib, init_p_do_apply,3, [{file,"proc_lib.erl"}, {line,227}]}]}, {gen_server,call, [{'ns_memcached-default', 'ns_1@10.1.2.30'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.23593.0> registered_name: [] exception exit: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:init_it/6 (gen_server.erl, line 321) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.54237>},connected}, check_started, {'$gen_call',{<0.23598.0>, {#Ref<0.0.2.54331>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, {'$gen_call',{<0.23623.0>,#Ref<0.0.2.54711>},topkeys}, check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.54734>},connected}, check_started, {'$gen_call',{<0.23194.0>,#Ref<0.0.2.54909>}, list_vbuckets_prevstate}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.55371>},connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, check_started,check_started] links: [<0.56.0>,<0.23203.0>,#Port<0.24107>] dictionary: [] trap_exit: true status: running heap_size: 75025 stack_size: 24 reductions: 6910 neighbours: [error_logger:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23194.0> registered_name: [] exception exit: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call, [{'ns_memcached-default','ns_1@10.1.2.30'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 (gen_server.erl, line 188) in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 (src/ns_janitor_map_recoverer.erl, line 40) in call from ns_janitor_map_recoverer:read_existing_map/4 (src/ns_janitor_map_recoverer.erl, line 39) in call from ns_janitor:do_cleanup/3 (src/ns_janitor.erl, line 46) ancestors: [<0.345.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.345.0>] dictionary: [] trap_exit: false status: running heap_size: 75025 stack_size: 24 reductions: 7383 neighbours: [error_logger:error] [2012-04-10 18:26:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23590.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:11] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23688.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:12] [ns_1@10.1.2.30:<0.23681.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:12] [ns_1@10.1.2.30:<0.23683.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23676.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23702.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:13] [ns_1@10.1.2.30:<0.23685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:13] [ns_1@10.1.2.30:<0.23523.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:14] [ns_1@10.1.2.30:<0.23699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:14] [ns_1@10.1.2.30:<0.23703.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:15] [ns_1@10.1.2.30:<0.23705.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:15] [ns_1@10.1.2.30:<0.23536.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:15] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23702.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:15] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23718.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:16] [ns_1@10.1.2.30:<0.23711.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:16] [ns_1@10.1.2.30:<0.23713.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:17] [ns_1@10.1.2.30:<0.23715.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:17] [ns_1@10.1.2.30:<0.23696.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:17] [ns_1@10.1.2.30:<0.23548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:18] [ns_1@10.1.2.30:<0.23723.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:18] [ns_1@10.1.2.30:<0.23728.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:18] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23718.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:18] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23740.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:19] [ns_1@10.1.2.30:<0.23730.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:19] [ns_1@10.1.2.30:<0.23601.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:20] [ns_1@10.1.2.30:<0.23737.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:20] [ns_1@10.1.2.30:<0.23741.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:21] [ns_1@10.1.2.30:<0.23743.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:21] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23740.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:21] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23757.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:21] [ns_1@10.1.2.30:<0.23558.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:22] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:22] [ns_1@10.1.2.30:<0.23749.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23696.0>} [stats:error] [2012-04-10 18:26:23] [ns_1@10.1.2.30:<0.23751.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:23] [ns_1@10.1.2.30:<0.23758.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:23] [ns_1@10.1.2.30:<0.23696.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:24] [ns_1@10.1.2.30:<0.23584.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:24] [ns_1@10.1.2.30:<0.23766.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:24] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23757.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:24] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23778.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:25] [ns_1@10.1.2.30:<0.23768.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:25] [ns_1@10.1.2.30:<0.23770.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:26] [ns_1@10.1.2.30:<0.23572.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:26] [ns_1@10.1.2.30:<0.23779.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:26] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6,[{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,undefined}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:26:26] [ns_1@10.1.2.30:<0.23696.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:26] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.23687.0> registered_name: [] exception exit: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:init_it/6 (gen_server.erl, line 321) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.55886>},connected}, check_started, {'$gen_call',{<0.23698.0>, {#Ref<0.0.2.55969>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, {'$gen_call',{<0.23725.0>,#Ref<0.0.2.56387>},topkeys}, check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.56409>},connected}, check_started,check_started,check_started, {'$gen_call',{<0.23736.0>, {#Ref<0.0.2.56547>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.56925>},connected}, check_started,check_started,check_started,check_started, check_started, {'$gen_call',{<0.23776.0>, {#Ref<0.0.2.57286>,'ns_1@10.1.2.30'}}, connected}, check_started,check_started,check_started,check_started, check_started] links: [<0.56.0>,<0.23203.0>,#Port<0.24139>] dictionary: [] trap_exit: true status: running heap_size: 75025 stack_size: 24 reductions: 6913 neighbours: [error_logger:error] [2012-04-10 18:26:26] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23688.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:26] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23790.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:27] [ns_1@10.1.2.30:<0.23781.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:27] [ns_1@10.1.2.30:<0.23783.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:27] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23778.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:27] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23799.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:28] [ns_1@10.1.2.30:<0.23611.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:28] [ns_1@10.1.2.30:<0.23793.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:29] [ns_1@10.1.2.30:<0.23795.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:29] [ns_1@10.1.2.30:<0.23800.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:30] [ns_1@10.1.2.30:<0.23628.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:30] [ns_1@10.1.2.30:<0.23806.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:30] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23799.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:30] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23817.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:31] [ns_1@10.1.2.30:<0.23808.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:31] [ns_1@10.1.2.30:<0.23810.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:32] [ns_1@10.1.2.30:<0.23640.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:32] [ns_1@10.1.2.30:<0.23818.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:32] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23696.0>} [ns_server:info] [2012-04-10 18:26:32] [ns_1@10.1.2.30:<0.23696.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:33] [ns_1@10.1.2.30:<0.23820.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:33] [ns_1@10.1.2.30:<0.23825.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23817.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:33] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23837.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:34] [ns_1@10.1.2.30:<0.23650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:34] [ns_1@10.1.2.30:<0.23831.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:35] [ns_1@10.1.2.30:<0.23833.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:35] [ns_1@10.1.2.30:<0.23838.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:36] [ns_1@10.1.2.30:<0.23665.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:36] [ns_1@10.1.2.30:<0.23844.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23837.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:36] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23858.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:37] [ns_1@10.1.2.30:<0.23846.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:37] [ns_1@10.1.2.30:<0.23848.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:38] [ns_1@10.1.2.30:<0.23677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:38] [ns_1@10.1.2.30:<0.23859.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:39] [ns_1@10.1.2.30:<0.23861.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:39] [ns_1@10.1.2.30:<0.23863.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:39] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23858.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:39] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23874.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:40] [ns_1@10.1.2.30:<0.23691.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:40] [ns_1@10.1.2.30:<0.23869.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:41] [ns_1@10.1.2.30:<0.23871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:41] [ns_1@10.1.2.30:<0.23875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:26:41] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:26:42] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23696.0>} [error_logger:error] [2012-04-10 18:26:42] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23874.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:42] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23892.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:26:45] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23892.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:45] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23896.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:26:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23896.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:48] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23904.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:51] [ns_1@10.1.2.30:<0.23707.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:51] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23904.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:51] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23913.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:52] [ns_1@10.1.2.30:<0.23719.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:26:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23696.0>} [stats:error] [2012-04-10 18:26:52] [ns_1@10.1.2.30:<0.23881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:53] [ns_1@10.1.2.30:<0.23883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:53] [ns_1@10.1.2.30:<0.23914.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:54] [ns_1@10.1.2.30:<0.23732.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:54] [ns_1@10.1.2.30:<0.23920.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:54] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23913.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:54] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23931.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:55] [ns_1@10.1.2.30:<0.23907.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:55] [ns_1@10.1.2.30:<0.23924.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:56] [ns_1@10.1.2.30:<0.23745.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:56] [ns_1@10.1.2.30:<0.23932.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:<0.23922.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:<0.23936.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.23789.0> registered_name: [] exception exit: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} in function gen_server:init_it/6 (gen_server.erl, line 321) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.57544>},connected}, {'$gen_call',{<0.23797.0>, {#Ref<0.0.2.57613>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.23822.0>,#Ref<0.0.2.57991>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.58013>},connected}, {'$gen_call',{<0.23835.0>, {#Ref<0.0.2.58190>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.58587>},connected}, {'$gen_call',{<0.23696.0>,#Ref<0.0.2.58792>}, list_vbuckets_prevstate}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.59072>},connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.59391>},connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.59691>},connected}, check_started,check_started,check_started,check_started, check_started,check_started,check_started,check_started, check_started,check_started, {'$gen_call',{<0.361.0>,#Ref<0.0.2.60187>},connected}, check_started,check_started] links: [#Port<0.24203>,<0.23203.0>,<0.56.0>,#Port<0.24171>] dictionary: [] trap_exit: true status: running heap_size: 75025 stack_size: 24 reductions: 7979 neighbours: [error_logger:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23790.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23948.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:26:57] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason {{{badmatch, {error,timeout}}, [{mc_client_binary, stats_recv,4, [{file, "src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary, stats,4, [{file, "src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached, ensure_bucket_config, 4, [{file, "src/ns_memcached.erl"}, {line,743}]}, {ns_memcached,init,1, [{file, "src/ns_memcached.erl"}, {line,112}]}, {gen_server,init_it,6, [{file, "gen_server.erl"}, {line,297}]}, {proc_lib, init_p_do_apply,3, [{file,"proc_lib.erl"}, {line,227}]}]}, {gen_server,call, [{'ns_memcached-default', 'ns_1@10.1.2.30'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23696.0> registered_name: [] exception exit: {{{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"},{line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"},{line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6, [{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]}, {gen_server,call, [{'ns_memcached-default','ns_1@10.1.2.30'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 (gen_server.erl, line 188) in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 (src/ns_janitor_map_recoverer.erl, line 40) in call from ns_janitor_map_recoverer:read_existing_map/4 (src/ns_janitor_map_recoverer.erl, line 39) in call from ns_janitor:do_cleanup/3 (src/ns_janitor.erl, line 46) ancestors: [<0.345.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.345.0>] dictionary: [] trap_exit: false status: running heap_size: 75025 stack_size: 24 reductions: 7383 neighbours: [error_logger:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {{badmatch,{error,timeout}}, [{mc_client_binary,stats_recv,4, [{file,"src/mc_client_binary.erl"}, {line,120}]}, {mc_client_binary,stats,4, [{file,"src/mc_client_binary.erl"}, {line,305}]}, {ns_memcached,ensure_bucket_config,4, [{file,"src/ns_memcached.erl"},{line,743}]}, {ns_memcached,init,1, [{file,"src/ns_memcached.erl"},{line,112}]}, {gen_server,init_it,6,[{file,"gen_server.erl"},{line,297}]}, {proc_lib,init_p_do_apply,3, [{file,"proc_lib.erl"},{line,227}]}]} Offender: [{pid,undefined}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23931.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:26:57] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23953.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:26:58] [ns_1@10.1.2.30:<0.23762.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:58] [ns_1@10.1.2.30:<0.23944.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:59] [ns_1@10.1.2.30:<0.23934.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:26:59] [ns_1@10.1.2.30:<0.23954.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:00] [ns_1@10.1.2.30:<0.23772.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:00] [ns_1@10.1.2.30:<0.23960.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:00] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23953.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:00] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23971.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:01] [ns_1@10.1.2.30:<0.23946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:02] [ns_1@10.1.2.30:<0.23964.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:02] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:02] [ns_1@10.1.2.30:<0.23785.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:03] [ns_1@10.1.2.30:<0.23958.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:03] [ns_1@10.1.2.30:<0.23962.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:03] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23971.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:03] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.23994.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:04] [ns_1@10.1.2.30:<0.23978.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:04] [ns_1@10.1.2.30:<0.23802.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:05] [ns_1@10.1.2.30:<0.23972.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:05] [ns_1@10.1.2.30:<0.23974.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:06] [ns_1@10.1.2.30:<0.23997.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:06] [ns_1@10.1.2.30:<0.23812.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:06] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.23994.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:06] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24012.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:07] [ns_1@10.1.2.30:<0.23991.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:07] [ns_1@10.1.2.30:<0.23995.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:07] [ns_1@10.1.2.30:<0.23982.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:08] [ns_1@10.1.2.30:<0.24007.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:08] [ns_1@10.1.2.30:<0.23827.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:09] [ns_1@10.1.2.30:<0.24003.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:09] [ns_1@10.1.2.30:<0.24005.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24012.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:09] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24031.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:10] [ns_1@10.1.2.30:<0.24021.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:10] [ns_1@10.1.2.30:<0.23842.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:11] [ns_1@10.1.2.30:<0.24015.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:11] [ns_1@10.1.2.30:<0.24019.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:12] [ns_1@10.1.2.30:<0.24036.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:12] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23982.0>} [stats:error] [2012-04-10 18:27:12] [ns_1@10.1.2.30:<0.23852.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:27:12] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:27:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24031.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:12] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24050.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:13] [ns_1@10.1.2.30:<0.24028.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:13] [ns_1@10.1.2.30:<0.23982.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:13] [ns_1@10.1.2.30:<0.24032.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:14] [ns_1@10.1.2.30:<0.24047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:14] [ns_1@10.1.2.30:<0.23879.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:15] [ns_1@10.1.2.30:<0.24040.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:15] [ns_1@10.1.2.30:<0.24042.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:15] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24050.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:15] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24069.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:16] [ns_1@10.1.2.30:<0.24062.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:16] [ns_1@10.1.2.30:<0.23918.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:17] [ns_1@10.1.2.30:<0.24055.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:17] [ns_1@10.1.2.30:<0.24057.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:18] [ns_1@10.1.2.30:<0.23867.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:18] [ns_1@10.1.2.30:<0.23989.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:18] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24069.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:18] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24090.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:19] [ns_1@10.1.2.30:<0.24066.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:19] [ns_1@10.1.2.30:<0.23982.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:19] [ns_1@10.1.2.30:<0.24070.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:20] [ns_1@10.1.2.30:<0.24074.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:21] [ns_1@10.1.2.30:<0.23968.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:21] [ns_1@10.1.2.30:<0.24078.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:21] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24090.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:21] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24105.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:22] [ns_1@10.1.2.30:<0.24083.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:22] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:22] [ns_1@10.1.2.30:<0.23928.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23982.0>} [stats:error] [2012-04-10 18:27:23] [ns_1@10.1.2.30:<0.24001.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:23] [ns_1@10.1.2.30:<0.24093.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:24] [ns_1@10.1.2.30:<0.24095.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:24] [ns_1@10.1.2.30:<0.23940.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:24] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24105.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:24] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24127.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:25] [ns_1@10.1.2.30:<0.24013.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:25] [ns_1@10.1.2.30:<0.24106.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:25] [ns_1@10.1.2.30:<0.23982.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:26] [ns_1@10.1.2.30:<0.24110.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:26] [ns_1@10.1.2.30:<0.24087.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:27] [ns_1@10.1.2.30:<0.24038.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:27] [ns_1@10.1.2.30:<0.24120.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:27] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24127.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:28] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24144.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:28] [ns_1@10.1.2.30:<0.24122.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:28] [ns_1@10.1.2.30:<0.24100.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:27:28] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:27:29] [ns_1@10.1.2.30:<0.24064.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:29] [ns_1@10.1.2.30:<0.24132.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:30] [ns_1@10.1.2.30:<0.24134.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:30] [ns_1@10.1.2.30:<0.24114.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24144.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24164.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:31] [ns_1@10.1.2.30:<0.24076.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:31] [ns_1@10.1.2.30:<0.24147.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:32] [ns_1@10.1.2.30:<0.24151.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:32] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23982.0>} [stats:error] [2012-04-10 18:27:32] [ns_1@10.1.2.30:<0.24128.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.23948.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:32] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24175.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:33] [ns_1@10.1.2.30:<0.24091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:33] [ns_1@10.1.2.30:<0.24157.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24164.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24184.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:34] [ns_1@10.1.2.30:<0.24161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:34] [ns_1@10.1.2.30:<0.24026.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:35] [ns_1@10.1.2.30:<0.24102.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:35] [ns_1@10.1.2.30:<0.24169.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:36] [ns_1@10.1.2.30:<0.24173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:37] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24184.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:37] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24201.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:37] [ns_1@10.1.2.30:<0.24053.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:37] [ns_1@10.1.2.30:<0.24130.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:38] [ns_1@10.1.2.30:<0.24185.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:38] [ns_1@10.1.2.30:<0.24189.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:39] [ns_1@10.1.2.30:<0.24139.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:39] [ns_1@10.1.2.30:<0.24141.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:40] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24201.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:40] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24219.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:40] [ns_1@10.1.2.30:<0.24195.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:40] [ns_1@10.1.2.30:<0.24200.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:41] [ns_1@10.1.2.30:<0.24153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:41] [ns_1@10.1.2.30:<0.24155.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:42] [ns_1@10.1.2.30:<0.24210.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:42] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23982.0>} [stats:error] [2012-04-10 18:27:42] [ns_1@10.1.2.30:<0.24214.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24219.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24239.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:43] [ns_1@10.1.2.30:<0.24165.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:43] [ns_1@10.1.2.30:<0.24167.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:44] [ns_1@10.1.2.30:<0.24222.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:44] [ns_1@10.1.2.30:<0.24226.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:27:44] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:27:45] [ns_1@10.1.2.30:<0.24191.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:45] [ns_1@10.1.2.30:<0.24193.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24239.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24257.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:46] [ns_1@10.1.2.30:<0.24236.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:46] [ns_1@10.1.2.30:<0.24240.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:47] [ns_1@10.1.2.30:<0.24118.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:47] [ns_1@10.1.2.30:<0.24206.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:48] [ns_1@10.1.2.30:<0.24250.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:48] [ns_1@10.1.2.30:<0.24252.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24257.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24278.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:49] [ns_1@10.1.2.30:<0.24203.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:49] [ns_1@10.1.2.30:<0.24220.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:50] [ns_1@10.1.2.30:<0.24262.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:50] [ns_1@10.1.2.30:<0.24179.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:51] [ns_1@10.1.2.30:<0.24216.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:51] [ns_1@10.1.2.30:<0.24230.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:51] [ns_1@10.1.2.30:<0.24246.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:51] [ns_1@10.1.2.30:<0.24258.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24278.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24298.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:52] [ns_1@10.1.2.30:<0.24275.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:27:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.23982.0>} [stats:error] [2012-04-10 18:27:53] [ns_1@10.1.2.30:<0.24264.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:53] [ns_1@10.1.2.30:<0.24228.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:54] [ns_1@10.1.2.30:<0.24268.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:54] [ns_1@10.1.2.30:<0.24287.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:55] [ns_1@10.1.2.30:<0.24279.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24298.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24318.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:55] [ns_1@10.1.2.30:<0.24244.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:56] [ns_1@10.1.2.30:<0.24283.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:56] [ns_1@10.1.2.30:<0.24303.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:57] [ns_1@10.1.2.30:<0.24289.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:57] [ns_1@10.1.2.30:<0.24181.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:27:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24318.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:27:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24332.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:27:58] [ns_1@10.1.2.30:<0.24299.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:58] [ns_1@10.1.2.30:<0.24315.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:59] [ns_1@10.1.2.30:<0.24305.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:27:59] [ns_1@10.1.2.30:<0.24254.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:00] [ns_1@10.1.2.30:<0.24311.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:00] [ns_1@10.1.2.30:<0.24327.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:28:00] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:28:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24332.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24352.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:01] [ns_1@10.1.2.30:<0.24319.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:01] [ns_1@10.1.2.30:<0.24266.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:28:01] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason {timeout, {gen_server,call, [{'ns_memcached-default', 'ns_1@10.1.2.30'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-04-10 18:28:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.23982.0> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@10.1.2.30'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 (gen_server.erl, line 188) in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 (src/ns_janitor_map_recoverer.erl, line 40) in call from ns_janitor_map_recoverer:read_existing_map/4 (src/ns_janitor_map_recoverer.erl, line 39) in call from ns_janitor:do_cleanup/3 (src/ns_janitor.erl, line 46) ancestors: [<0.345.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.345.0>] dictionary: [] trap_exit: false status: running heap_size: 75025 stack_size: 24 reductions: 7385 neighbours: [stats:error] [2012-04-10 18:28:02] [ns_1@10.1.2.30:<0.24323.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:02] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:02] [ns_1@10.1.2.30:<0.24341.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:03] [ns_1@10.1.2.30:<0.24329.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:03] [ns_1@10.1.2.30:<0.24281.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24352.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24376.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:04] [ns_1@10.1.2.30:<0.24337.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:04] [ns_1@10.1.2.30:<0.24353.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:05] [ns_1@10.1.2.30:<0.24343.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:05] [ns_1@10.1.2.30:<0.24291.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:06] [ns_1@10.1.2.30:<0.24347.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:06] [ns_1@10.1.2.30:<0.24368.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24376.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24394.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:07] [ns_1@10.1.2.30:<0.24355.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:07] [ns_1@10.1.2.30:<0.24365.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:07] [ns_1@10.1.2.30:<0.24293.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24175.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24399.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:08] [ns_1@10.1.2.30:<0.24363.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:08] [ns_1@10.1.2.30:<0.24383.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:09] [ns_1@10.1.2.30:<0.24373.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:09] [ns_1@10.1.2.30:<0.24295.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24394.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24414.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:10] [ns_1@10.1.2.30:<0.24381.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:10] [ns_1@10.1.2.30:<0.24395.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:11] [ns_1@10.1.2.30:<0.24385.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:11] [ns_1@10.1.2.30:<0.24309.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:12] [ns_1@10.1.2.30:<0.24391.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:12] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24365.0>} [stats:error] [2012-04-10 18:28:12] [ns_1@10.1.2.30:<0.24409.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24414.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24434.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:13] [ns_1@10.1.2.30:<0.24397.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:13] [ns_1@10.1.2.30:<0.24365.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:13] [ns_1@10.1.2.30:<0.24321.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:14] [ns_1@10.1.2.30:<0.24407.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:14] [ns_1@10.1.2.30:<0.24421.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:15] [ns_1@10.1.2.30:<0.24411.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:15] [ns_1@10.1.2.30:<0.24333.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24434.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24453.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:16] [ns_1@10.1.2.30:<0.24419.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:28:16] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:28:17] [ns_1@10.1.2.30:<0.24435.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:17] [ns_1@10.1.2.30:<0.24423.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:17] [ns_1@10.1.2.30:<0.24365.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:17] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.23949.0> registered_name: 'ns_memcached-default' exception exit: {bad_return_value,{error,couldnt_connect_to_memcached}} in function gen_server:init_it/6 (gen_server.erl, line 325) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.60316>},connected}, {'$gen_call',{<0.23984.0>, {#Ref<0.0.2.60842>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.23985.0>,#Ref<0.0.2.60868>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.60987>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.61441>},connected}, {'$gen_call',{<0.24025.0>, {#Ref<0.0.2.61525>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.61947>},connected}, {'$gen_call',{<0.24061.0>, {#Ref<0.0.2.62096>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.62511>},connected}, {'$gen_call',{<0.24099.0>, {#Ref<0.0.2.62758>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.63087>},connected}, {'$gen_call',{<0.24138.0>, {#Ref<0.0.2.63410>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.63548>},connected}, {'$gen_call',{<0.23982.0>,#Ref<0.0.2.63878>}, list_vbuckets_prevstate}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.64137>},connected}, {'$gen_call',{<0.24205.0>,#Ref<0.0.2.64556>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.64610>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.65178>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.65742>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.66288>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.66744>},connected}, {'$gen_call',{<0.24367.0>, {#Ref<0.0.2.67239>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.67391>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.67900>},connected}, {'$gen_call',{<0.24406.0>, {#Ref<0.0.2.67906>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.24432.0>,#Ref<0.0.2.68312>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.68388>},connected}, {'$gen_call',{<0.24445.0>, {#Ref<0.0.2.68493>,'ns_1@10.1.2.30'}}, connected}] links: [#Port<0.24318>,<0.23203.0>,#Port<0.24353>,#Port<0.24252>, #Port<0.24285>,#Port<0.24218>] dictionary: [] trap_exit: true status: running heap_size: 46368 stack_size: 24 reductions: 12172 neighbours: [error_logger:error] [2012-04-10 18:28:17] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {bad_return_value,{error,couldnt_connect_to_memcached}} Offender: [{pid,undefined}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:28:17] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{bad_return_value,{error,couldnt_connect_to_memcached}}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24399.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:17] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24463.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:18] [ns_1@10.1.2.30:<0.24345.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:18] [ns_1@10.1.2.30:<0.24430.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:19] [ns_1@10.1.2.30:<0.24448.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24453.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24477.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:19] [ns_1@10.1.2.30:<0.24439.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:20] [ns_1@10.1.2.30:<0.24357.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:20] [ns_1@10.1.2.30:<0.24446.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:21] [ns_1@10.1.2.30:<0.24460.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:21] [ns_1@10.1.2.30:<0.24450.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24477.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24491.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:22] [ns_1@10.1.2.30:<0.24377.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:22] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:22] [ns_1@10.1.2.30:<0.24458.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24365.0>} [stats:error] [2012-04-10 18:28:23] [ns_1@10.1.2.30:<0.24478.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:23] [ns_1@10.1.2.30:<0.24467.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:23] [ns_1@10.1.2.30:<0.24365.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:24] [ns_1@10.1.2.30:<0.24387.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:24] [ns_1@10.1.2.30:<0.24474.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24491.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24515.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:25] [ns_1@10.1.2.30:<0.24488.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:25] [ns_1@10.1.2.30:<0.24480.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:26] [ns_1@10.1.2.30:<0.24400.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:26] [ns_1@10.1.2.30:<0.24486.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:27] [ns_1@10.1.2.30:<0.24505.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:27] [ns_1@10.1.2.30:<0.24492.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:28] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24515.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:28] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24533.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:28] [ns_1@10.1.2.30:<0.24415.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:28] [ns_1@10.1.2.30:<0.24503.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:29] [ns_1@10.1.2.30:<0.24518.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:29] [ns_1@10.1.2.30:<0.24507.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:30] [ns_1@10.1.2.30:<0.24425.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:30] [ns_1@10.1.2.30:<0.24516.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24533.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24551.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:31] [ns_1@10.1.2.30:<0.24528.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:31] [ns_1@10.1.2.30:<0.24520.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:32] [ns_1@10.1.2.30:<0.24441.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:32] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24365.0>} [stats:error] [2012-04-10 18:28:32] [ns_1@10.1.2.30:<0.24526.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:28:32] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:28:33] [ns_1@10.1.2.30:<0.24542.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:33] [ns_1@10.1.2.30:<0.24534.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24551.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24570.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:34] [ns_1@10.1.2.30:<0.24454.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:34] [ns_1@10.1.2.30:<0.24540.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:35] [ns_1@10.1.2.30:<0.24554.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:35] [ns_1@10.1.2.30:<0.24544.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:36] [ns_1@10.1.2.30:<0.24469.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:36] [ns_1@10.1.2.30:<0.24552.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:37] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24570.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:37] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24607.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:37] [ns_1@10.1.2.30:<0.24567.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:37] [ns_1@10.1.2.30:<0.24556.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:38] [ns_1@10.1.2.30:<0.24482.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:38] [ns_1@10.1.2.30:<0.24565.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:39] [ns_1@10.1.2.30:<0.24579.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:39] [ns_1@10.1.2.30:<0.24571.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:40] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24607.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:40] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24625.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:40] [ns_1@10.1.2.30:<0.24496.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:40] [ns_1@10.1.2.30:<0.24577.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:41] [ns_1@10.1.2.30:<0.24610.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:41] [ns_1@10.1.2.30:<0.24581.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:42] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:42] [ns_1@10.1.2.30:<0.24509.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24365.0>} [stats:error] [2012-04-10 18:28:42] [ns_1@10.1.2.30:<0.24608.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24625.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24647.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:43] [ns_1@10.1.2.30:<0.24622.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:43] [ns_1@10.1.2.30:<0.24614.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:44] [ns_1@10.1.2.30:<0.24522.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:44] [ns_1@10.1.2.30:<0.24620.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:45] [ns_1@10.1.2.30:<0.24634.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:45] [ns_1@10.1.2.30:<0.24626.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24647.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24663.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:46] [ns_1@10.1.2.30:<0.24538.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:47] [ns_1@10.1.2.30:<0.24632.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:47] [ns_1@10.1.2.30:<0.24650.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:48] [ns_1@10.1.2.30:<0.24638.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:48] [ns_1@10.1.2.30:<0.24548.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:28:48] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:28:49] [ns_1@10.1.2.30:<0.24648.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24663.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24684.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:49] [ns_1@10.1.2.30:<0.24660.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:50] [ns_1@10.1.2.30:<0.24652.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:50] [ns_1@10.1.2.30:<0.24560.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:51] [ns_1@10.1.2.30:<0.24658.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:51] [ns_1@10.1.2.30:<0.24675.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:28:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24684.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24698.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:28:52] [ns_1@10.1.2.30:<0.24664.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:28:52] [ns_1@10.1.2.30:<0.24677.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:28:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24365.0>} [error_logger:error] [2012-04-10 18:28:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24463.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24705.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:28:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24698.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24711.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:28:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24711.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:28:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24718.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:28:59] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason {timeout, {gen_server,call, [{'ns_memcached-default', 'ns_1@10.1.2.30'}, list_vbuckets_prevstate, 30000]}} [error_logger:error] [2012-04-10 18:28:59] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_janitor:cleanup/2 pid: <0.24365.0> registered_name: [] exception exit: {timeout, {gen_server,call, [{'ns_memcached-default','ns_1@10.1.2.30'}, list_vbuckets_prevstate,30000]}} in function gen_server:call/3 (gen_server.erl, line 188) in call from ns_janitor_map_recoverer:'-read_existing_map/4-lc$^0/1-0-'/2 (src/ns_janitor_map_recoverer.erl, line 40) in call from ns_janitor_map_recoverer:read_existing_map/4 (src/ns_janitor_map_recoverer.erl, line 39) in call from ns_janitor:do_cleanup/3 (src/ns_janitor.erl, line 46) ancestors: [<0.345.0>,mb_master_sup,mb_master,ns_server_sup, ns_server_cluster_sup,<0.59.0>] messages: [] links: [<0.345.0>] dictionary: [] trap_exit: false status: running heap_size: 75025 stack_size: 24 reductions: 7385 neighbours: [error_logger:error] [2012-04-10 18:29:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24718.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24724.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:02] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24724.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24736.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:29:04] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:29:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24736.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24742.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:29:07] [ns_1@10.1.2.30:<0.24729.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24742.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24749.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:29:12] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24729.0>} [error_logger:error] [2012-04-10 18:29:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24749.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24758.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:29:13] [ns_1@10.1.2.30:<0.24729.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24758.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24763.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:29:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24763.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24772.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:info] [2012-04-10 18:29:19] [ns_1@10.1.2.30:<0.24729.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:29:21] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:29:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24772.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24785.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:29:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason shutdown [stats:error] [2012-04-10 18:29:22] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:24] [ns_1@10.1.2.30:<0.24642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:24] [ns_1@10.1.2.30:<0.24656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24785.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24806.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:25] [ns_1@10.1.2.30:<0.24656.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:27] [ns_1@10.1.2.30:<0.24642.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:27] [ns_1@10.1.2.30:<0.24792.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:27] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24705.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:27] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24813.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:29:28] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24806.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:28] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24815.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:28] [ns_1@10.1.2.30:<0.24575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:30] [ns_1@10.1.2.30:<0.24585.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24815.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:31] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24832.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:31] [ns_1@10.1.2.30:<0.24575.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:32] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24792.0>} [ns_server:info] [2012-04-10 18:29:33] [ns_1@10.1.2.30:<0.24792.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24832.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:34] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24841.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:35] [ns_1@10.1.2.30:<0.24786.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:29:35] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:218] Janitor run exited for bucket "default" with reason shutdown [ns_server:warn] [2012-04-10 18:29:37] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:29:38] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================CRASH REPORT========================= crasher: initial call: ns_memcached:init/1 pid: <0.24462.0> registered_name: 'ns_memcached-default' exception exit: {bad_return_value,{error,couldnt_connect_to_memcached}} in function gen_server:init_it/6 (gen_server.erl, line 325) ancestors: ['ns_memcached_sup-default','single_bucket_sup-default', <0.23201.0>] messages: [{'$gen_call',{<0.361.0>,#Ref<0.0.2.68917>},connected}, {'$gen_call',{<0.24473.0>, {#Ref<0.0.2.69020>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.24500.0>,#Ref<0.0.2.69477>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.69499>},connected}, {'$gen_call',{<0.24513.0>, {#Ref<0.0.2.69672>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.69960>},connected}, {'$gen_call',{<0.24365.0>,#Ref<0.0.2.70143>}, list_vbuckets_prevstate}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.70548>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.71161>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.71731>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.72252>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.72772>},connected}, {'$gen_call',{<0.24714.0>,#Ref<0.0.2.72935>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.72962>},connected}, {'$gen_call',{<0.24731.0>, {#Ref<0.0.2.73244>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.73375>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.73596>},connected}, {'$gen_call',{<0.24747.0>, {#Ref<0.0.2.73627>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.73821>},connected}, {'$gen_call',{<0.24761.0>, {#Ref<0.0.2.73899>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.74127>},connected}, {'$gen_call',{<0.24775.0>, {#Ref<0.0.2.74243>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.24794.0>, {#Ref<0.0.2.74530>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.74577>},connected}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.75108>},connected}, {'$gen_call',{<0.24825.0>, {#Ref<0.0.2.75246>,'ns_1@10.1.2.30'}}, connected}, {'$gen_call',{<0.24836.0>,#Ref<0.0.2.75581>},topkeys}, {'$gen_call',{<0.361.0>,#Ref<0.0.2.75692>},connected}, {'$gen_call',{<0.24844.0>, {#Ref<0.0.2.75755>,'ns_1@10.1.2.30'}}, connected}] links: [#Port<0.24485>,<0.23203.0>,#Port<0.24551>,#Port<0.24422>, #Port<0.24476>,#Port<0.24387>] dictionary: [] trap_exit: true status: running heap_size: 46368 stack_size: 24 reductions: 12184 neighbours: [error_logger:error] [2012-04-10 18:29:38] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {{bad_return_value,{error,couldnt_connect_to_memcached}}, {gen_server,call,['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24813.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:error] [2012-04-10 18:29:38] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,'ns_memcached_sup-default'} Context: start_error Reason: {bad_return_value,{error,couldnt_connect_to_memcached}} Offender: [{pid,undefined}, {name,{ns_memcached,stats,"default"}}, {mfargs,{ns_memcached,start_link,[{"default",stats}]}}, {restart_type,permanent}, {shutdown,86400000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:38] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24860.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [ns_server:warn] [2012-04-10 18:29:40] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:idle:339] Nodes ['ns_1@10.1.2.30'] failed to delete bucket "default" within expected time. [ns_server:info] [2012-04-10 18:29:40] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:idle:345] Restarting moxi on nodes [] [menelaus:info] [2012-04-10 18:29:40] [ns_1@10.1.2.30:<0.24701.0>:menelaus_web_buckets:handle_bucket_delete:207] Deleted bucket "default" [menelaus:info] [2012-04-10 18:29:40] [ns_1@10.1.2.30:<0.24780.0>:menelaus_web_buckets:do_bucket_create:275] Created bucket "default" of type: membase [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.24781.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.23026.0>:ns_port_server:log:166] moxi<0.23026.0>: 2012-04-10 18:29:42: (agent_config.c.705) ERROR: bad JSON configuration from http://127.0.0.1:8091/pools/default/saslBucketsStreaming: Number of buckets must be a power of two > 0 and <= MAX_BUCKETS ({ moxi<0.23026.0>: "name": "default", moxi<0.23026.0>: "nodeLocator": "vbucket", moxi<0.23026.0>: "saslPassword": "", moxi<0.23026.0>: "nodes": [{ moxi<0.23026.0>: "couchApiBase": "http://10.1.2.30:8092/default", moxi<0.23026.0>: "replication": 0, moxi<0.23026.0>: "clusterMembership": "active", moxi<0.23026.0>: "status": "warmup", moxi<0.23026.0>: "thisNode": true, moxi<0.23026.0>: "hostname": "10.1.2.30:8091", moxi<0.23026.0>: "clusterCompatibility": 1, moxi<0.23026.0>: "version": "2.0.0r-1065-rel-enterprise", moxi<0.23026.0>: "os": "x86_64-unknown-linux-gnu", moxi<0.23026.0>: "ports": { moxi<0.23026.0>: "proxy": 11211, moxi<0.23026.0>: "direct": 11210 moxi<0.23026.0>: } moxi<0.23026.0>: }], moxi<0.23026.0>: "vBucketServerMap": { moxi<0.23026.0>: "hashAlgorithm": "CRC", moxi<0.23026.0>: "numReplicas": 1, moxi<0.23026.0>: "serverList": ["10.1.2.30:11210"], moxi<0.23026.0>: "vBucketMap": [] moxi<0.23026.0>: } moxi<0.23026.0>: }) [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.24630.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.24699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.3685.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.24618.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:41] [ns_1@10.1.2.30:<0.24870.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:42] [ns_1@10.1.2.30:<0.24871.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:42] [ns_1@10.1.2.30:<0.24872.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:42] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24894.0>} [stats:error] [2012-04-10 18:29:43] [ns_1@10.1.2.30:<0.24910.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24841.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:43] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24924.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:43] [ns_1@10.1.2.30:<0.24912.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:44] [ns_1@10.1.2.30:<0.24914.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:44] [ns_1@10.1.2.30:<0.24875.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:45] [ns_1@10.1.2.30:<0.24925.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:45] [ns_1@10.1.2.30:<0.24927.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:45] [ns_1@10.1.2.30:<0.24894.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:46] [ns_1@10.1.2.30:<0.24929.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24924.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:46] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24940.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:46] [ns_1@10.1.2.30:<0.24876.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:47] [ns_1@10.1.2.30:<0.24935.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:47] [ns_1@10.1.2.30:<0.24937.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:48] [ns_1@10.1.2.30:<0.24941.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:48] [ns_1@10.1.2.30:<0.24877.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24940.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:49] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24960.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:49] [ns_1@10.1.2.30:<0.24948.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:49] [ns_1@10.1.2.30:<0.24953.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:50] [ns_1@10.1.2.30:<0.24955.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:50] [ns_1@10.1.2.30:<0.24878.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:51] [ns_1@10.1.2.30:<0.24963.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:51] [ns_1@10.1.2.30:<0.24965.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:51] [ns_1@10.1.2.30:<0.24894.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24960.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:52] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24976.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:52] [ns_1@10.1.2.30:<0.24969.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:52] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24894.0>} [stats:error] [2012-04-10 18:29:52] [ns_1@10.1.2.30:<0.24879.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:29:53] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:29:53] [ns_1@10.1.2.30:<0.24973.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:53] [ns_1@10.1.2.30:<0.24977.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:54] [ns_1@10.1.2.30:<0.24981.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:54] [ns_1@10.1.2.30:<0.24881.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24976.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:55] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.24997.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:55] [ns_1@10.1.2.30:<0.24988.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:55] [ns_1@10.1.2.30:<0.24990.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:56] [ns_1@10.1.2.30:<0.24994.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:56] [ns_1@10.1.2.30:<0.24883.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:57] [ns_1@10.1.2.30:<0.25000.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:57] [ns_1@10.1.2.30:<0.25002.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:29:58] [ns_1@10.1.2.30:<0.24894.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:29:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.24997.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:29:58] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25015.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:29:58] [ns_1@10.1.2.30:<0.25006.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:58] [ns_1@10.1.2.30:<0.24886.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:59] [ns_1@10.1.2.30:<0.25010.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:29:59] [ns_1@10.1.2.30:<0.25016.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:00] [ns_1@10.1.2.30:<0.25020.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:00] [ns_1@10.1.2.30:<0.24887.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25015.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:01] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25034.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:01] [ns_1@10.1.2.30:<0.24699.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:01] [ns_1@10.1.2.30:<0.24630.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:01] [ns_1@10.1.2.30:<0.25025.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:01] [ns_1@10.1.2.30:<0.25027.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:02] [ns_1@10.1.2.30:<0.25031.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:30:02] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24894.0>} [stats:error] [2012-04-10 18:30:02] [ns_1@10.1.2.30:<0.24888.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:03] [ns_1@10.1.2.30:<0.25041.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:03] [ns_1@10.1.2.30:<0.25043.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:30:04] [ns_1@10.1.2.30:<0.24894.0>:ns_janitor:wait_for_memcached:280] Waiting for "default" on ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25034.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:04] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25057.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:04] [ns_1@10.1.2.30:<0.25047.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:04] [ns_1@10.1.2.30:<0.24889.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:05] [ns_1@10.1.2.30:<0.25054.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:05] [ns_1@10.1.2.30:<0.25058.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:06] [ns_1@10.1.2.30:<0.25062.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:07] [ns_1@10.1.2.30:<0.24897.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25057.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:07] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25076.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:07] [ns_1@10.1.2.30:<0.25067.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:08] [ns_1@10.1.2.30:<0.25069.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:08] [ns_1@10.1.2.30:<0.24890.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:30:09] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [stats:error] [2012-04-10 18:30:09] [ns_1@10.1.2.30:<0.24921.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:09] [ns_1@10.1.2.30:<0.25079.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:10] [ns_1@10.1.2.30:<0.25083.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25076.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:10] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25094.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:10] [ns_1@10.1.2.30:<0.25073.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:11] [ns_1@10.1.2.30:<0.24933.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:11] [ns_1@10.1.2.30:<0.25091.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:12] [ns_1@10.1.2.30:<0.25095.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:12] [ns_1@10.1.2.30:<0.25087.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:30:12] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24894.0>} [error_logger:error] [2012-04-10 18:30:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {timeout,{gen_server,call, ['ns_memcached-default',topkeys,30000]}} Offender: [{pid,<0.24860.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25110.0>}, {name,hot_keys_keeper}, {mfargs,{hot_keys_keeper,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:13] [ns_1@10.1.2.30:<0.24946.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25094.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:13] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25116.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:13] [ns_1@10.1.2.30:<0.25103.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:14] [ns_1@10.1.2.30:<0.25105.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:14] [ns_1@10.1.2.30:<0.25099.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:15] [ns_1@10.1.2.30:<0.24961.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:15] [ns_1@10.1.2.30:<0.25119.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25116.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:16] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25130.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:16] [ns_1@10.1.2.30:<0.25121.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:16] [ns_1@10.1.2.30:<0.25113.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:17] [ns_1@10.1.2.30:<0.24986.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:17] [ns_1@10.1.2.30:<0.25131.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:18] [ns_1@10.1.2.30:<0.25135.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:18] [ns_1@10.1.2.30:<0.25125.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25130.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:19] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25152.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:19] [ns_1@10.1.2.30:<0.24998.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:19] [ns_1@10.1.2.30:<0.25145.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:20] [ns_1@10.1.2.30:<0.25149.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:20] [ns_1@10.1.2.30:<0.24971.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:21] [ns_1@10.1.2.30:<0.3688.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:21] [ns_1@10.1.2.30:<0.24585.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:21] [ns_1@10.1.2.30:<0.25023.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:21] [ns_1@10.1.2.30:<0.25157.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [error_logger:error] [2012-04-10 18:30:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25152.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:22] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25172.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:22] [ns_1@10.1.2.30:<0.25161.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:info] [2012-04-10 18:30:22] [ns_1@10.1.2.30:<0.345.0>:ns_orchestrator:handle_info:209] Skipping janitor in state janitor_running: {janitor_state, ["default"], <0.24894.0>} [stats:error] [2012-04-10 18:30:22] [ns_1@10.1.2.30:<0.25137.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:23] [ns_1@10.1.2.30:<0.25039.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:23] [ns_1@10.1.2.30:<0.25173.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:24] [ns_1@10.1.2.30:<0.25177.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [stats:error] [2012-04-10 18:30:24] [ns_1@10.1.2.30:<0.25153.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30'] [ns_server:warn] [2012-04-10 18:30:25] [ns_1@10.1.2.30:'ns_memcached-default':ns_memcached:connect:697] Unable to connect: {error,{badmatch,{error,timeout}}}, retrying. [error_logger:error] [2012-04-10 18:30:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================SUPERVISOR REPORT========================= Supervisor: {local,menelaus_sup} Context: child_terminated Reason: {noproc, {gen_server,call, [{'stats_reader-default','ns_1@10.1.2.30'}, {latest,minute,1}]}} Offender: [{pid,<0.25172.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [error_logger:info] [2012-04-10 18:30:25] [ns_1@10.1.2.30:error_logger:ale_error_logger_handler:log_report:72] =========================PROGRESS REPORT========================= supervisor: {local,menelaus_sup} started: [{pid,<0.25192.0>}, {name,menelaus_web_alerts_srv}, {mfargs,{menelaus_web_alerts_srv,start_link,[]}}, {restart_type,permanent}, {shutdown,5000}, {child_type,worker}] [stats:error] [2012-04-10 18:30:25] [ns_1@10.1.2.30:<0.25052.0>:stats_reader:log_bad_responses:191] Some nodes didn't respond: ['ns_1@10.1.2.30']