[Openstack] unexpected distribution of compute instances in queens

Zufar Dhiyaulhaq zufardhiyaulhaq at gmail.com
Mon Nov 26 10:45:33 UTC 2018


Hi,

I am deploying OpenStack with 3 compute node, but I am seeing an abnormal
distribution of instance, the instance is only deployed in a specific
compute node, and not distribute among other compute node.

this is my nova.conf from the compute node. (template jinja2 based)

[DEFAULT]
osapi_compute_listen = {{ hostvars[inventory_hostname]['ansible_ens3f1'][
'ipv4']['address'] }}
metadata_listen = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
'address'] }}
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
my_ip = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address']
}}
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_api
[barbican]
[cache]
backend=oslo_cache.memcache_pool
enabled=true
memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://{{ vip }}:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://{{ vip }}:5000/v3
memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = {{ nova_pw }}
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://{{ vip }}:9696
auth_url = http://{{ vip }}:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = {{ neutron_pw }}
service_metadata_proxy = true
metadata_proxy_shared_secret = {{ metadata_secret }}
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://{{ vip }}:5000/v3
username = placement
password = {{ placement_pw }}
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
keymap=en-us
novncproxy_base_url = https://{{ vip }}:6080/vnc_auto.html
novncproxy_host = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
'address'] }}
[workarounds]
[wsgi]
[xenserver]
[xvp]
[placement_database]
connection=mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_placement

what is the problem? I have lookup the openstack-nova-scheduler in the
controller node but it's running well with only warning

 nova-scheduler[19255]:
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332:
NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported

the result I want is the instance is distributed in all compute node.
Thank you.

-- 

*Regards,*
*Zufar Dhiyaulhaq*
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openstack.org/pipermail/openstack/attachments/20181126/00172bf5/attachment.html>


More information about the Openstack mailing list