[Openstack-operators] accessing vm metadata fails with grizzly

Shyam Goud shyam.todeti at oneconvergence.com
Mon Jun 10 11:46:41 UTC 2013


All,

After installing standard grizzly recently, I am seeing this issue:
VM unable to get metadata info.

    cloudinit start running: Mon, 10 Jun 2013 10:01:35 +0000. up 4.05 seconds
    20130610 10:01:35,726  util.py[WARNING]: 'http://169.254.169.254/20090404/metadata/instanceid' failed [50/120s]: socket timeout [timed out]
    20130610 10:02:26,782  util.py[WARNING]: 'http://169.254.169.254/20090404/metadata/instanceid' failed [101/120s]: socket timeout [timed out]
    20130610 10:02:44,803  util.py[WARNING]: 'http://169.254.169.254/20090404/metadata/instanceid' failed [119/120s]: socket timeout [timed out]
    20130610 10:02:45,806  DataSourceEc2.py[CRITICAL]: giving up on md after 120 seconds

    no instance data found in start

 From name space, I am able to reach controller and all rules seems to 
be fine.

    root at server14:/etc/init.d# ip netns exec qrouter-71a89bc2-d2b5-45e4-b87f-1186e3665732 iptables-save | grep 169.254.169.254
    -A quantum-l3-agent-PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j*REDIRECT --to-ports 8775*
    root at server14:/etc/init.d#

    root at server14:/etc/init.d# ip netns exec qrouter-71a89bc2-d2b5-45e4-b87f-1186e3665732 netstat -anp
    Active Internet connections (servers and established)
    Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
    tcp        0*0 0.0.0.0:8775*             0.0.0.0:*               LISTEN      5519/python
    Active UNIX domain sockets (servers and established)
    Proto RefCnt Flags       Type       State         I-Node   PID/Program name    Path
    root at server14:/etc/init.d#

    root at server14:/etc/init.d# ip netns exec qrouter-71a89bc2-d2b5-45e4-b87f-1186e3665732 ping 10.2.113.12
    PING 10.2.113.12 (10.2.113.12) 56(84) bytes of data.
    64 bytes from 10.2.113.12: icmp_req=1 ttl=64 time=0.299 ms
    64 bytes from 10.2.113.12: icmp_req=2 ttl=64 time=0.064 ms

nova.conf content:

    root at server14:/etc/init.d# cat /etc/nova/nova.conf
    [DEFAULT]
    logdir=/var/log/nova
    state_path=/var/lib/nova
    lock_path=/run/lock/nova
    verbose=True
    api_paste_config=/etc/nova/api-paste.ini
    compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
    s3_host=10.2.113.12
    ec2_host=10.2.113.12
    ec2_dmz_host=10.2.113.12
    rabbit_host=10.2.113.12
    nova_url=http://10.2.113.12:8774/v1.1/
    sql_connection=mysql://novaUser:novaPass@10.2.113.12/nova
    root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf

    # Auth
    use_deprecated_auth=false
    auth_strategy=keystone

    # Imaging service
    glance_api_servers=10.2.113.12:9292
    image_service=nova.image.glance.GlanceImageService

    # Vnc configuration
    novnc_enabled=true
    novncproxy_base_url=http://10.2.113.12:6080/vnc_auto.html
    novncproxy_port=6080
    vncserver_proxyclient_address=10.2.113.12
    vncserver_listen=0.0.0.0

    # Network settings
    network_api_class=nova.network.quantumv2.api.API
    quantum_url=http://10.2.113.12:9696
    quantum_auth_strategy=keystone
    quantum_admin_tenant_name=service
    quantum_admin_username=quantum
    quantum_admin_password=service_pass
    quantum_admin_auth_url=http://10.2.113.12:35357/v2.0
    libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
    linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
    firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver

    #Metadata
    service_quantum_metadata_proxy = True
    quantum_metadata_proxy_shared_secret = helloOpenStack
    metadata_host = 10.2.113.12
    metadata_listen = 0.0.0.0
    #metadata_listen_port = 8775

    # Compute #
    compute_driver=libvirt.LibvirtDriver

    # Cinder #
    volume_api_class=nova.volume.cinder.API
    osapi_volume_listen_port=5900

    root at server14:/etc/init.d#

Quantum l3_agent entry:

    root at server14:/etc/init.d# cat /etc/quantum/l3_agent.ini
    [DEFAULT]
    # Show debugging output in log (sets DEBUG log level output)
    # debug = True

    # L3 requires that an interface driver be set.  Choose the one that best
    # matches your plugin.

    # OVS based plugins (OVS, Ryu, NEC) that supports L3 agent
    interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
    # OVS based plugins(Ryu, NEC) that use OVS
    # as OpenFlow switch and check port status
    #ovs_use_veth = True
    # LinuxBridge
    #interface_driver = quantum.agent.linux.interface.BridgeInterfaceDriver

    auth_url = http://10.2.113.12:35357/v2.0
    auth_region = RegionOne
    admin_tenant_name = service
    admin_user = quantum
    admin_password = service_pass
    metadata_ip = 10.2.113.12
    metadata_port = 8775

    # router_id =
    root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf

Quantum metadata.ini content:

    root at server14:/etc/init.d# cat /etc/quantum/metadata_agent.ini
    [DEFAULT]
    # The Quantum user information for accessing the Quantum API.
    auth_url = http://10.2.113.12:35357/v2.0
    auth_region = RegionOne
    admin_tenant_name = service
    admin_user = quantum
    admin_password = service_pass

    # IP address used by Nova metadata server
    nova_metadata_ip = 10.2.113.12

    # TCP Port used by Nova metadata server
    nova_metadata_port = 8775

    metadata_proxy_shared_secret = helloOpenStack
    root at server14:/etc/init.d#

Please let me know if I am missing something.

Thanks,
Shyam.

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openstack.org/pipermail/openstack-operators/attachments/20130610/d1517c27/attachment.html>


More information about the OpenStack-operators mailing list