left
and right
to change sections. Press up
and down
to move within sections. Press ?
anytime for help.
>>> from jnpr.junos import Device
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> junos = Device(host='127.0.0.1', user='vagrant', port=12203)
>>> junos.open()
Device(127.0.0.1)
>>> junos_facts = junos.facts
>>> pp.pprint(junos_facts)
{ '2RE': False,
'HOME': '/cf/var/home/vagrant',
'RE0': { 'last_reboot_reason': 'Router rebooted after a normal shutdown.',
'model': 'FIREFLY-PERIMETER RE',
'status': 'Testing',
'up_time': '3 minutes, 36 seconds'},
'domain': None,
'fqdn': 'vsrx',
'hostname': 'vsrx',
'model': 'FIREFLY-PERIMETER',
'personality': 'SRX_BRANCH',
'serialnumber': '5b2b599a283b',
'srx_cluster': False,
'version': '12.1X47-D20.7',
'version_info': junos.version_info(major=(12, 1), type=X, minor=(47, 'D', 20), build=7),
'virtual': True}
>>> junos.close()
>>> import pyeapi
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>> connection = pyeapi.client.connect(transport='https', host='127.0.0.1', username='vagrant',
... password='vagrant', port=12443,)
>>> eos = pyeapi.client.Node(connection)
>>> eos_facts = eos.run_commands(['show version'])
>>> pp.pprint(eos_facts)
[ { u'architecture': u'i386',
u'bootupTimestamp': 1475859369.15,
u'hardwareRevision': u'',
u'internalBuildId': u'e796e94c-ba3b-4355-afcf-ef0abfbfaee3',
u'internalVersion': u'4.16.6M-3205780.4166M',
u'isIntlVersion': False,
u'memFree': 56204,
u'memTotal': 1897596,
u'modelName': u'vEOS',
u'serialNumber': u'',
u'systemMacAddress': u'08:00:27:52:27:ce',
u'version': u'4.16.6M'}]
>>> from jnpr.junos import Device
>>> from jnpr.junos.utils.config import Config
>>>
>>> junos = Device(host='127.0.0.1', user='vagrant', port=12203)
>>> junos.open()
Device(127.0.0.1)
>>>
>>> print(junos.facts['hostname'])
vsrx
>>> junos.bind(cu=Config)
>>> junos.cu.lock()
True
>>> junos.cu.load("system {host-name new-hostname;}", format="text", merge=True)
>>> junos.cu.commit()
True
>>> junos.cu.unlock()
True
>>> junos.facts_refresh()
>>> print(junos.facts['hostname'])
new-hostname
>>> junos.close()
>>> import pyeapi
>>> connection = pyeapi.client.connect(
... transport='https',
... host='127.0.0.1',
... username='vagrant',
... password='vagrant',
... port=12443,
... )
>>> eos = pyeapi.client.Node(connection)
>>> print(eos.run_commands(['show hostname'])[0]['hostname'])
localhost
>>> eos.run_commands(['configure', 'hostname a-new-hostname'])
[{}, {}]
>>> print(eos.run_commands(['show hostname'])[0]['hostname'])
a-new-hostname
>>> from napalm_base import get_network_driver
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> junos_driver = get_network_driver('junos')
>>> eos_driver = get_network_driver('eos')
>>>
>>> junos_configuration = {
... 'hostname': '127.0.0.1',
... 'username': 'vagrant',
... 'password': '',
... 'optional_args': {'port': 12203}
... }
>>>
>>> eos_configuration = {
... 'hostname': '127.0.0.1',
... 'username': 'vagrant',
... 'password': 'vagrant',
... 'optional_args': {'port': 12443}
... }
>>>
>>> with junos_driver(**junos_configuration) as junos:
... pp.pprint(junos.get_facts())
...
{ 'fqdn': u'new-hostname',
'hostname': u'new-hostname',
'interface_list': [ 'ge-0/0/0', 'gr-0/0/0',
'lt-0/0/0', 'mt-0/0/0', 'vlan'],
'model': u'FIREFLY-PERIMETER',
'os_version': u'12.1X47-D20.7',
'serial_number': u'5b2b599a283b',
'uptime': 1080,
'vendor': u'Juniper'}
>>>
>>> with eos_driver(**eos_configuration) as eos:
... pp.pprint(eos.get_facts())
...
{ 'fqdn': u'a-new-hostname',
'hostname': u'a-new-hostname',
'interface_list': [u'Ethernet1', u'Ethernet2',
u'Management1'],
'model': u'vEOS',
'os_version': u'4.16.6M-3205780.4166M',
'serial_number': u'',
'uptime': 1217,
'vendor': u'Arista'}
>>> from napalm_base import get_network_driver
>>> junos_driver = get_network_driver('junos')
>>> eos_driver = get_network_driver('eos')
>>>
>>> junos_configuration = {
... 'hostname': '127.0.0.1',
... 'username': 'vagrant',
... 'password': '',
... 'optional_args': {'port': 12203}
... }
>>>
>>> eos_configuration = {
... 'hostname': '127.0.0.1',
... 'username': 'vagrant',
... 'password': 'vagrant',
... 'optional_args': {'port': 12443}
... }
>>>
>>> def change_configuration(device, configuration):
... device.load_merge_candidate(config=configuration)
... print(device.compare_config())
... device.commit_config()
...
>>>
>>> with junos_driver(**junos_configuration) as junos:
... change_configuration(
... junos,
... "system {host-name yet-another-hostname;}"
... )
...
[edit system]
- host-name new-hostname;
+ host-name yet-another-hostname;
>>>
>>> with junos_driver(**junos_configuration) as junos:
... change_configuration(
... junos,
... "system {host-name yet-another-hostname;}"
... )
...
>>>
>>> with eos_driver(**eos_configuration) as eos:
... change_configuration(
... eos,
... 'hostname yet-another-hostname'
... )
...
@@ -8,7 +8,7 @@
!
transceiver qsfp default-mode 4x10G
!
-hostname a-new-hostname
+hostname yet-another-hostname
!
spanning-tree mode mstp
!
>>>
>>> with eos_driver(**eos_configuration) as eos:
... change_configuration(
... eos,
... 'hostname yet-another-hostname'
... )
...
>>>
Changes are idempotent
ansible
and jinja2
templates to abstract configuration syntax and focus on the parameters to change
---
- name: "Get facts"
hosts: all
connection: local
gather_facts: no
vars:
tasks:
- name: "get facts from device"
napalm_get_facts:
hostname: "{{ host }}"
username: "{{ user }}"
dev_os: "{{ os }}"
password: "{{ password }}"
optional_args:
port: "{{ port }}"
filter: ['facts']
register: napalm_facts
- name: Facts
debug:
msg: "{{ napalm_facts.ansible_facts.facts|to_nice_json }}"
tags: [print_action]
➜ cat hosts
[all]
rtr00 os=eos host=127.0.0.1 user=vagrant password=vagrant port=12443
rtr01 os=junos host=127.0.0.1 user=vagrant password="" port=12203
➜ cat group_vars/all.yml
---
ansible_python_interpreter: "/usr/bin/env python"
domain: acme.com
➜ cat host_vars/rtr00.yml
---
hostname: thehostnama
➜ cat host_vars/rtr01.yml
---
hostname: another-host
➜ cat templates/eos/simple.j2
hostname {{ hostname }}
ip domain-name {{ domain }}
➜ cat templates/junos/simple.j2
system {
host-name {{ hostname}};
domain-name {{ domain }};
}
---
tasks:
- name: A simple template with some configuration
template:
src: "{{ os }}/simple.j2"
dest: "{{ host_dir }}/simple.conf"
changed_when: False
when: commit_changes == 0
- name: Load configuration into the device
napalm_install_config:
hostname: "{{ host }}"
username: "{{ user }}"
dev_os: "{{ os }}"
password: "{{ password }}"
optional_args:
port: "{{ port }}"
config_file: "{{ host_dir }}/simple.conf"
commit_changes: "{{ commit_changes }}"
replace_config: false
get_diffs: True
diff_file: "{{ host_dir }}/diff"
tags: [print_action]
YAML
jinja2
templates to translate variables into device configurationNAPALM
to seamlessly deploy the resulting configuration and to gather information➜ cat host_vars/rtr00.yml
---
hostname: rtr00
asn: 65000
router_id: "1.1.1.100"
interfaces:
- name: "lo0"
ip_address: "2001:db8:b33f::100/128"
- name: "et1"
ip_address: "2001:db8:caf3:1::/127"
- name: "et2"
ip_address: "2001:db8:caf3:2::/127"
peers:
- ip: "2001:db8:caf3:1::1"
asn: 65001
- ip: "2001:db8:caf3:2::1"
asn: 65001
➜ cat host_vars/rtr01.yml
---
hostname: rtr01
asn: 65001
router_id: "1.1.1.101"
interfaces:
- name: "lo0"
ip_address: "2001:db8:b33f::101/128"
- name: "ge-0/0/1"
ip_address: "2001:db8:caf3:1::1/127"
- name: "ge-0/0/2"
ip_address: "2001:db8:caf3:2::1/127"
peers:
- ip: "2001:db8:caf3:1::"
asn: 65000
- ip: "2001:db8:caf3:2::"
asn: 65000
➜ cat hosts
[all]
rtr00 os=eos host=127.0.0.1 user=vagrant password=vagrant port=12443
rtr01 os=junos host=127.0.0.1 user=vagrant password="" port=12203
➜ cat (...)/templates/eos/ipfabric.j2
ipv6 unicast-routing
{% for interface in interfaces %}
default interface {{ interface.name }}
interface {{ interface.name }}
{{ 'no switchport' if not interface.name.startswith("lo") else "" }}
ipv6 address {{ interface.ip_address }}
{% endfor %}
route-map EXPORT-LO0 permit 10
match interface Loopback0
no router bgp
router bgp {{ asn }}
router-id {{ router_id }}
redistribute connected route-map EXPORT-LO0
{% for peer in peers %}
neighbor {{ peer.ip }} remote-as {{ peer.asn }}
address-family ipv6
neighbor {{ peer.ip }} activate
{% endfor %}
➜ cat (...)/templates/junos/ipfabric.j2
{% for interface in interfaces %}
interfaces {
replace:
{{ interface.name }} {
unit 0 {
family inet6 {
address {{ interface.ip_address }};
}
}
}
}
{% endfor %}
routing-options {
router-id {{ router_id }};
autonomous-system {{ asn }};
}
policy-options {
policy-statement EXPORT_LO0 {
from interface lo0.0;
then accept;
}
policy-statement PERMIT_ALL {
from protocol bgp;
then accept;
}
}
protocols {
replace:
bgp {
import PERMIT_ALL;
export [ EXPORT_LO0 PERMIT_ALL ];
}
}
{% for peer in peers %}
protocols {
bgp {
group peers {
neighbor {{ peer.ip }} {
peer-as {{ peer.asn }};
}
}
}
}
{% endfor %}
...
- name: Basic Configuration
hosts: all
connection: local
roles:
- base
- name: Fabric Configuration
hosts: all
connection: local
roles:
- ipfabric
...
➜ tree roles
├── base
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ ├── eos
│ │ └── simple.j2
│ └── junos
│ └── simple.j2
└── ipfabric
├── tasks
│ └── main.yml
└── templates
├── eos
│ └── ipfabric.j2
└── junos
└── ipfabric.j2
- name: "get facts from device"
napalm_get_facts:
hostname: "{{ host }}"
username: "{{ user }}"
dev_os: "{{ os }}"
password: "{{ password }}"
optional_args:
port: "{{ port }}"
filter: ['bgp_neighbors']
register: napalm_facts
- name: "Check BGP sessions are healthy"
assert:
that:
- item.value.is_up
msg: "{{ item.key }} is down"
with_dict: "{{ napalm_facts.ansible_facts.bgp_neighbors.global.peers }}"
- name: "Check BGP sessions are receiving prefixes"
assert:
that:
- item.value.address_family.ipv6.received_prefixes > 0
msg: "{{ item.key }} is not receiving any prefixes"
with_dict: "{{ napalm_facts.ansible_facts.bgp_neighbors.global.peers }}"
~/.pynsotrc
[pynsot]
url = http://localhost:8990/api
secret_key = 3AvRKqff0uuFT0d44ecqHIman31HPAcQ_nTLcF0m_uA= # Token
auth_method = auth_token
email = admin@acme.com
Make sure you replace the secret_key
with the token you gathered in the previous step.
{% for iface, iface_data in ifaces.items() if iface_data.attributes.link_type == "fabric" %}
{% set peer =iface_data.attributes.connects_to_device %}
{% set peer_iface =iface_data.attributes.connects_to_iface %}
{% set peer_ip = hostvars[peer]['interfaces'][peer_iface]['ipv6'][0].split('/')[0] %}
{% set peer_asn = hostvars[peer]['asn'] %}
neighbor {{ peer_ip }} remote-as {{ peer_asn }}
neighbor {{ peer_ip }} description {{ peer }}:{{ peer_iface }}
address-family ipv6
neighbor {{ peer_ip }} activate
{% endfor %}
- name: "Get facts from device"
napalm_get_facts:
hostname: "{{ host }}"
username: "{{ user }}"
dev_os: "{{ os }}"
password: "{{ password }}"
optional_args:
port: "{{ port }}"
filter: ['bgp_neighbors']
register: napalm_facts
- name: "Check all BGP sessions are configured"
assert:
that:
- hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0] in napalm_facts.ansible_facts.bgp_neighbors.global.peers
msg: "{{ '{}:{}'.format(item.value.attributes.connects_to_iface, item.value.attributes.connects_to_device) }} ({{ hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0] }}) is missing"
with_dict: "{{ interfaces }}"
when: "{{ item.value.attributes.link_type == 'fabric' }}"
tags: [print_action]
Let's take a look:
hostvars[item.value.attributes.connects_to_device]['interfaces']\
[item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0]\
in napalm_facts.ansible_facts.bgp_neighbors.global.peers
item.value # interface
interface.attributes.connects_to_device # peer
interface.attributes.connects_to_iface # peer_iface
napalm_facts.ansible_facts.bgp_neighbors.global.peers # my_configured_peers
hostvars[peer]['interfaces'][peer_iface]['ipv6'] in my_configured_peers
- name: "Check BGP sessions are healthy"
assert:
that:
- napalm_facts.ansible_facts.bgp_neighbors.global.peers[hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0]].is_up
msg: "{{ '{}:{}'.format(item.value.attributes.connects_to_iface, item.value.attributes.connects_to_device) }} ({{ hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0] }}) is configured but down"
with_dict: "{{ interfaces }}"
when: "{{ item.value.attributes.link_type == 'fabric' }}"
tags: [print_action]
- name: "Chack BGP sessions are receiving prefixes"
assert:
that:
- napalm_facts.ansible_facts.bgp_neighbors.global.peers[hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0]].address_family.ipv6.received_prefixes > 0
msg: "{{ '{}:{}'.format(item.value.attributes.connects_to_iface, item.value.attributes.connects_to_device) }} ({{ hostvars[item.value.attributes.connects_to_device]['interfaces'][item.value.attributes.connects_to_iface]['ipv6'][0].split('/')[0] }}) is not receiving any prefixes"
with_dict: "{{ interfaces }}"
when: "{{ item.value.attributes.link_type == 'fabric' }}"
tags: [print_action]
2001:db8:caf3::
is down in rtr01
it's because 2001:db8:caf3::1
is not configured in rtr00
.
YAML
files with a backend (nsot
)nsot
that we previously haddata/acme/devices.yml
---
devices:
rtr00:
os: eos
host: 127.0.0.1
domain: acme.com
user: vagrant
password: vagrant
port: '12443'
asn: '65001'
router_id: 10.1.1.1
rtr01:
os: junos
host: 127.0.0.1
domain: acme.com
user: vagrant
password: ''
port: '12203'
asn: '65002'
router_id: 10.1.1.2
data/acme/attributes.yml
---
attributes:
Device:
os: {}
host: {}
user: {}
password:
constraints:
allow_empty: true
port: {}
asn: {}
router_id: {}
domain: {}
Network:
type: {}
service: {}
Interface:
link_type: {}
connects_to_device: {}
connects_to_iface: {}
data/acme/services.yml
---
loopbacks:
network_ranges:
loopbacks: 2001:db8:feed::/48
definition: {}
ipfabric:
network_ranges:
fabric_links: 2001:db8:cafe::/48
definition:
links:
- left_device: rtr00
left_iface: et1
right_device: rtr01
right_iface: ge-0/0/1
- left_device: rtr00
left_iface: et2
right_device: rtr01
right_iface: ge-0/0/2
data/acme/devices.yml
---
devices:
evil00:
os: eos
host: 127.0.0.1
domain: evilcorp.com
user: vagrant
password: vagrant
port: '12443'
asn: '65666'
router_id: 10.6.66.1
evil01:
os: junos
host: 127.0.0.1
domain: evil.com
user: vagrant
password: ''
port: '12203'
asn: '65666'
router_id: 10.6.66.2
data/acme/attributes.yml
---
attributes:
Device:
os: {}
host: {}
user: {}
password:
constraints:
allow_empty: true
port: {}
asn: {}
router_id: {}
domain: {}
Network:
type: {}
service: {}
Interface:
link_type: {}
connects_to_device: {}
connects_to_iface: {}
data/acme/services.yml
---
loopbacks:
network_ranges:
loopbacks: 2001:db8:dead::/48
definition: {}
ipfabric:
network_ranges:
fabric_links: 2001:db8:c0ff::/48
definition:
links:
- left_device: rtr00
left_iface: et1
right_device: rtr01
right_iface: ge-0/0/1
- left_device: rtr00
left_iface: et2
right_device: rtr01
right_iface: ge-0/0/2
inv site.create --name evil
inv site.add_atribbutes -s evil -f data/evil/attributes.yml
inv site.add_devices -s evil -f data/evil/devices.yml
inv serivce.loopbacks -s evil -f data/evil/services.yml
inv serivce.ipfabric -s evil -f data/evil/services.yml