Paste #600784
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 | Packstack is hung: at "Testing if puppet apply is finished: 172.100.10.2_compute.pp"
========
[root@vm-d1 ~]# packstack --answer-file=new_answers.cfg2
Welcome to the Packstack setup utility
The installation log file is available at: /var/tmp/packstack/20170228-180109-zOO_TH/openstack-setup.log
Installing:
Clean Up [ DONE ]
.
.
.
.
Applying 172.100.10.1_controller.pp
172.100.10.1_controller.pp: [ DONE ]
Applying 172.100.10.1_network.pp
172.100.10.1_network.pp: [ DONE ]
Applying 172.100.10.2_compute.pp
Applying 172.100.10.3_compute.pp
Testing if puppet apply is finished: 172.100.10.2_compute.pp [ | ]
=========
On first compute node :
=========
[root@vm-c1 ~]#
[root@vm-c1 ~]# ps -eaf |grep compute
root 14785 14784 0 18:26 ? 00:00:00 flock /var/tmp/packstack/8f8a0767d0654bedaf431a2561b1c023/ps.lock puppet apply --modulepath /var/tmp/packstack/8f8a0767d0654bedaf431a2561b1c023/modules /var/tmp/packstack/8f8a0767d0654bedaf431a2561b1c023/manifests/172.100.10.3_compute.pp
root 14786 14785 0 18:26 ? 00:00:21 /usr/bin/ruby /usr/bin/puppet apply --modulepath /var/tmp/packstack/8f8a0767d0654bedaf431a2561b1c023/modules /var/tmp/packstack/8f8a0767d0654bedaf431a2561b1c023/manifests/172.100.10.3_compute.pp
root 15381 14786 0 18:26 ? 00:00:01 /usr/bin/systemctl restart openstack-nova-compute
nova 17285 1 0 18:50 ? 00:00:10 /usr/bin/python2 /usr/bin/nova-compute
root 21721 22081 0 19:51 pts/0 00:00:00 grep --color=auto compute
[root@vm-c1 ~]#
[root@vm-c1 ~]#
[root@vm-c1 ~]#
[root@vm-c1 ~]# cat /proc/15381/stack
[<ffffffff81212b95>] poll_schedule_timeout+0x55/0xb0
[<ffffffff8121411d>] do_sys_poll+0x4cd/0x580
[<ffffffff81214523>] SyS_ppoll+0x1b3/0x1d0
[<ffffffff816964c9>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
[root@vm-c1 ~]#
[root@vm-c1 ~]# cat /proc/17285/stack
[<ffffffff81212b95>] poll_schedule_timeout+0x55/0xb0
[<ffffffff81213511>] do_select+0x6d1/0x7c0
[<ffffffff812137db>] core_sys_select+0x1db/0x300
[<ffffffff812139ba>] SyS_select+0xba/0x110
[<ffffffff816964c9>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
[root@vm-c1 ~]#
[root@vm-c1 ~]#
[root@vm-c1 ~]# systemctl status openstack-nova-compute
â— openstack-nova-compute.service - OpenStack Nova Compute Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled)
Active: activating (start) since Tue 2017-02-28 18:50:46 IST; 1h 9min ago
Main PID: 17285 (nova-compute)
CGroup: /system.slice/openstack-nova-compute.service
└─17285 /usr/bin/python2 /usr/bin/nova-compute
Feb 28 18:50:46 vm-c1.veritas.com systemd[1]: Starting OpenStack Nova Compute Server...
[root@vm-c1 ~]#
[root@vm-c1 ~]#
[root@vm-c1 ~]# cat /var/log/nova/nova-compute.log
.
.
.
2017-02-28 18:26:26.493 14221 INFO nova.compute.resource_tracker [req-af5f0cec-34fb-4bdd-b664-3ff4fcf76e7c - - - - -] Final resource view: name=vm-c1.veritas.com phys_ram=34815MB used_ram=512MB phys_disk=49GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[]
2017-02-28 18:26:39.296 15400 INFO os_vif [-] Loaded VIF plugins: ovs, linux_bridge
2017-02-28 18:26:39.297 15400 WARNING oslo_reports.guru_meditation_report [-] Guru meditation now registers SIGUSR1 and SIGUSR2 by default for backward compatibility. SIGUSR1 will no longer be registered in a future release, so please use SIGUSR2 to generate reports.
2017-02-28 18:26:39.339 15400 WARNING oslo_config.cfg [-] Option "use_neutron" from group "DEFAULT" is deprecated for removal. Its value may be silently ignored in the future.
2017-02-28 18:26:39.372 15400 INFO nova.virt.driver [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] Loading compute driver 'libvirt.LibvirtDriver'
2017-02-28 18:26:39.561 15400 WARNING os_brick.initiator.connectors.remotefs [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] Connection details not present. RemoteFsClient may not initialize properly.
2017-02-28 18:26:39.577 15400 ERROR oslo.messaging._drivers.impl_rabbit [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] [eb1aaead-9c55-4baf-8e81-1b3e26040558] AMQP server on 172.100.10.1:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 1 seconds. Client port: None
2017-02-28 18:26:40.599 15400 ERROR oslo.messaging._drivers.impl_rabbit [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] [eb1aaead-9c55-4baf-8e81-1b3e26040558] AMQP server on 172.100.10.1:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 2 seconds. Client port: None
2017-02-28 18:26:42.622 15400 ERROR oslo.messaging._drivers.impl_rabbit [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] [eb1aaead-9c55-4baf-8e81-1b3e26040558] AMQP server on 172.100.10.1:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 4 seconds. Client port: None
2017-02-28 18:26:46.649 15400 ERROR oslo.messaging._drivers.impl_rabbit [req-c4a1d481-d3d7-4c92-b472-87f337f0c9b7 - - - - -] [eb1aaead-9c55-4baf-8e81-1b3e26040558] AMQP server on 172.100.10.1:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 6 seconds. Client port: None
.
.
.
==============
On controller node:
==============
[root@vm-d1 ~(keystone_admin)]# systemctl list-units|grep nova
openstack-nova-api.service loaded active running OpenStack Nova API Server
openstack-nova-cert.service loaded active running OpenStack Nova Cert Server
openstack-nova-conductor.service loaded active running OpenStack Nova Conductor Server
openstack-nova-consoleauth.service loaded active running OpenStack Nova VNC console auth Server
openstack-nova-novncproxy.service loaded active running OpenStack Nova NoVNC Proxy Server
openstack-nova-scheduler.service loaded active running OpenStack Nova Scheduler Server
[root@vm-d1 ~(keystone_admin)]#
[root@vm-d1 ~(keystone_admin)]# systemctl list-units|grep rabbit
rabbitmq-server.service loaded active running RabbitMQ broker
[root@vm-d1 ~(keystone_admin)]#
[root@vm-d1 ~(keystone_admin)]#
[root@vm-d1 ~(keystone_admin)]#
[root@vm-d1 ~(keystone_admin)]# openstack compute service list
+----+------------------+-------------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+-------------------+----------+---------+-------+----------------------------+
| 7 | nova-cert | vm-d1.veritas.com | internal | enabled | up | 2017-02-28T14:26:30.000000 |
| 8 | nova-conductor | vm-d1.veritas.com | internal | enabled | up | 2017-02-28T14:26:33.000000 |
| 12 | nova-scheduler | vm-d1.veritas.com | internal | enabled | up | 2017-02-28T14:26:28.000000 |
| 13 | nova-consoleauth | vm-d1.veritas.com | internal | enabled | up | 2017-02-28T14:26:34.000000 |
| 14 | nova-compute | vm-c1.veritas.com | nova | enabled | down | 2017-02-28T12:56:23.000000 |
+----+------------------+---------------------------+----------+---------+-------+--------------------+
[root@vm-d1 ~(keystone_admin)]#
==============
nova.conf on compute node:
==============
[root@vm-c1 ~]# grep -v \# /etc/nova/nova.conf |sed '/^\s*$/d'
[DEFAULT]
instance_usage_audit_period=hour
rootwrap_config=/etc/nova/rootwrap.conf
compute_driver=libvirt.LibvirtDriver
allow_resize_to_same_host=False
vif_plugging_is_fatal=True
vif_plugging_timeout=300
force_raw_images=True
reserved_host_memory_mb=512
cpu_allocation_ratio=16.0
ram_allocation_ratio=1.5
instance_usage_audit=True
heal_instance_info_cache_interval=60
force_snat_range=0.0.0.0/0
metadata_host=172.100.10.1
dhcp_domain=novalocal
use_neutron=True
firewall_driver=nova.virt.firewall.NoopFirewallDriver
state_path=/var/lib/nova
report_interval=10
service_down_time=60
debug=False
log_dir=/var/log/nova
transport_url=rabbit://guest:guest@172.100.10.1:5672/
rpc_backend=rabbit
image_service=nova.image.glance.GlanceImageService
notify_api_faults=False
notify_on_state_change=vm_and_task_state
volume_api_class=nova.volume.cinder.API
compute_manager=nova.compute.manager.ComputeManager
[api]
auth_strategy=keystone
[api_database]
connection=mysql+pymysql://nova_api:mypassword@172.100.10.1/nova_api
[barbican]
[cache]
[cells]
[cinder]
catalog_info=volumev2:cinderv2:publicURL
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
connection=mysql+pymysql://nova:mypassword@172.100.10.1/nova
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers=172.100.10.1:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
[libvirt]
virt_type=qemu
inject_password=False
inject_key=False
inject_partition=-1
live_migration_uri=qemu+tcp://nova@%s/system
cpu_mode=none
vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url=http://172.100.10.1:9696
region_name=RegionOne
ovs_bridge=br-int
extension_sync_interval=600
timeout=60
auth_type=v3password
auth_url=http://172.100.10.1:35357/v3
project_name=services
project_domain_name=Default
username=neutron
user_domain_name=Default
password=mypassword
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver=messagingv2
[oslo_messaging_rabbit]
rabbit_use_ssl=False
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name=RegionOne
auth_type=password
auth_url=http://172.100.10.1:5000/v3
project_name=services
project_domain_name=Default
username=placement
user_domain_name=Default
password=mypassword
[placement_database]
connection=mysql+pymysql://nova_placement:mypassword@172.100.10.1/nova_placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled=True
keymap=en-us
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=vm-c1.veritas.com
novncproxy_base_url=http://172.100.10.1:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[root@vm-c1 ~]#
============
nova.conf on controller:
============
[root@vm-d1 ~(keystone_admin)]# grep -v \# /etc/nova/nova.conf |sed '/^\s*$/d'
[DEFAULT]
rootwrap_config=/etc/nova/rootwrap.conf
allow_resize_to_same_host=False
vif_plugging_is_fatal=True
vif_plugging_timeout=300
cpu_allocation_ratio=16.0
ram_allocation_ratio=1.5
default_floating_pool=public
force_snat_range=0.0.0.0/0
metadata_host=172.100.10.1
dhcp_domain=novalocal
use_neutron=True
firewall_driver=nova.virt.firewall.NoopFirewallDriver
state_path=/var/lib/nova
report_interval=10
service_down_time=60
enabled_apis=osapi_compute,metadata
osapi_compute_listen=0.0.0.0
osapi_compute_listen_port=8774
osapi_compute_workers=8
metadata_listen=0.0.0.0
metadata_listen_port=8775
metadata_workers=8
debug=False
log_dir=/var/log/nova
transport_url=rabbit://guest:guest@172.100.10.1:5672/
rpc_backend=rabbit
image_service=nova.image.glance.GlanceImageService
notify_api_faults=False
notify_on_state_change=vm_and_task_state
osapi_volume_listen=0.0.0.0
[api]
auth_strategy=keystone
use_forwarded_for=False
fping_path=/usr/sbin/fping
[api_database]
connection=mysql+pymysql://nova_api:mypassword@172.100.10.1/nova_api
[barbican]
[cache]
[cells]
[cinder]
catalog_info=volumev2:cinderv2:publicURL
[cloudpipe]
[conductor]
workers=8
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
connection=mysql+pymysql://nova:mypassword@172.100.10.1/nova
[ephemeral_storage_encryption]
[filter_scheduler]
host_subset_size=1
max_io_ops_per_host=8
max_instances_per_host=50
available_filters=nova.scheduler.filters.all_filters
enabled_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,CoreFilter
use_baremetal_filters=False
weight_classes=nova.scheduler.weights.all_weighers
[glance]
api_servers=172.100.10.1:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri=http://172.100.10.1:5000/
auth_type=password
username=nova
project_name=services
password=mypassword
auth_url=http://172.100.10.1:35357
[libvirt]
vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url=http://172.100.10.1:9696
region_name=RegionOne
ovs_bridge=br-int
extension_sync_interval=600
service_metadata_proxy=True
metadata_proxy_shared_secret=mypassword
timeout=60
auth_type=v3password
auth_url=http://172.100.10.1:35357/v3
project_name=services
project_domain_name=Default
username=neutron
user_domain_name=Default
password=mypassword
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver=messagingv2
[oslo_messaging_rabbit]
rabbit_use_ssl=False
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
policy_file=/etc/nova/policy.json
[pci]
[placement]
os_region_name=RegionOne
auth_type=password
auth_url=http://172.100.10.1:5000/
project_name=services
project_domain_name=Default
username=placement
user_domain_name=Default
password=mypassword
[placement_database]
connection=mysql+pymysql://nova_placement:mypassword@172.100.10.1/nova_placement
[quota]
[rdp]
[remote_debug]
[scheduler]
host_manager=host_manager
driver=filter_scheduler
max_attempts=3
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
novncproxy_base_url=http://0.0.0.0:6080/vnc_auto.html
novncproxy_host=0.0.0.0
novncproxy_port=6080
[workarounds]
[wsgi]
api_paste_config=api-paste.ini
[xenserver]
[xvp]
[root@vm-d1 ~(keystone_admin)]#
============
|