Skip to content

Commit

Permalink
Merge pull request #2 from arshiagg/netmaster
Browse files Browse the repository at this point in the history
Netmaster Sidecar Added
  • Loading branch information
dseevr authored Aug 9, 2017
2 parents 5ffe4fb + 284cf25 commit 56b8dd0
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 11 deletions.
3 changes: 2 additions & 1 deletion contiv-grafana.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,4 +75,5 @@ spec:
ports:
- protocol: TCP
port: 3000
nodePort: 30001
nodePort: 32701

2 changes: 1 addition & 1 deletion contiv-prometheus.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,4 @@ spec:
ports:
- protocol: TCP
port: 9090
nodePort: 30000
nodePort: 32700
98 changes: 91 additions & 7 deletions exporter.rb
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,43 @@
OVS_DB_PORT = 6640
REQUIRED_KEYS = %w[external_ids name statistics]

def netplugin?
ENV.fetch("EXPORTER_MODE") == "netplugin"
end

def netmaster?
!netplugin?
end

set :bind, '0.0.0.0'
set :port, netplugin? ? 9004 : 9005

get '/metrics' do
# get etcd IP
etcd = ENV.fetch("CONTIV_ETCD").split("//").last

#get netmaster IP
puts "fetching leading address"
netmaster = JSON.parse(HTTParty.get("http://#{etcd}/v2/keys/contiv.io/lock/netmaster/leader").body)["node"]["value"]
netmaster_addr = JSON.parse(HTTParty.get("http://#{etcd}/v2/keys/contiv.io/lock/netmaster/leader").body)["node"]["value"]

# Get a list of networks
puts "fetching networks"
raw_networks = JSON.parse(HTTParty.get("http://#{netmaster}/api/v1/networks/").body)
raw_networks = JSON.parse(HTTParty.get("http://#{netmaster_addr}/api/v1/networks/").body)

to_display = []

if netplugin?
to_display = netplugin_stats(netmaster_addr, raw_networks)
end

if netmaster?
to_display = netmaster_stats(netmaster_addr, raw_networks)
end

to_display.join("\n") + "\n"
end

def netplugin_stats(netmaster_addr, raw_networks)

networks = []
epInfo = {}
Expand All @@ -34,7 +58,7 @@
# Get endpoints and endpoint info for each network
networks.each do |net|
puts "fetching #{net} network data"
raw_epstats = JSON.parse(HTTParty.get("http://#{netmaster}/api/v1/inspect/networks/#{net}/").body)
raw_epstats = JSON.parse(HTTParty.get("http://#{netmaster_addr}/api/v1/inspect/networks/#{net}/").body)

tenant = raw_epstats["Config"]["tenantName"]
network = raw_epstats["Config"]["networkName"]
Expand All @@ -44,21 +68,25 @@
endptID = ep["endpointID"]
host = ep["homingHost"]
container = ep["containerName"]
epg = ep["serviceName"]

# create hash of endpointID to hash of endpoint info
epInfo[endptID] = {
"tenant": tenant,
"network": network,
"endpointID": endptID,
"host": host,
"containerName": container,
}

# add epg if endpoint is part of one
if epg
epInfo[endptID]["endpointGroup"] = epg
end
end
end

puts "epInfo:"
puts epInfo.inspect


# get ovs stats
cmd = "ovs-vsctl --db=tcp:127.0.0.1:#{OVS_DB_PORT} list interface | egrep '^name|external_ids|statistics'"
Expand Down Expand Up @@ -100,6 +128,22 @@
# get stats into hash
epstats = interface["statistics"].split(":").last.scan(/(\w+)=(\d+)/).to_h


# OVS outputs data from it's point of view, not the container point of view
# so rx and tx must be reversed
new_hash = {}

epstats.keys.each do |key|
if key.start_with?("rx_")
new_hash[key.gsub("rx_", "tx_")] = epstats[key]
elsif key.start_with?("tx_")
new_hash[key.gsub("tx_", "rx_")] = epstats[key]
else
new_hash[key] = epstats[key]
end
end

epstats = new_hash
#create key-value pairs and store into array
info = "{" + epInfo[key].map{|k,v| "#{k}=\"#{v}\""}.join(", ") + "}"
epstats.each do |metric, value|
Expand All @@ -109,6 +153,46 @@
end
end

# return key-value pairs
records.join("\n") + "\n"
records
end

def netmaster_stats(netmaster_addr, raw_networks)

records = []
# Get a list of networks
puts "fetching data"
raw_tenants = JSON.parse(HTTParty.get("http://#{netmaster_addr}/api/v1/tenants/").body)
raw_epg = JSON.parse(HTTParty.get("http://#{netmaster_addr}/api/v1/endpointGroups/").body)

records << "count_of_tenants #{raw_tenants.length}"

# count of networks per tenant
raw_tenants.each do |tenant_block|
tenant = tenant_block["tenantName"]
data = tenant_block["link-sets"]
if data["Networks"]
records << "count_of_networks{tenant=\"#{tenant}\"} #{data["Networks"].length}"
end
end

# count of epg per (tenant, network)
raw_networks.each do |network_block|
data = network_block["link-sets"]
if data["EndpointGroups"]
records << "count_of_endpointGroups{tenant=\"#{network_block["tenantName"]}\", network=\"#{network_block["networkName"]}\"} #{data["EndpointGroups"].length}"
end
end

# count of policies per (tenant, network, epg)
raw_epg.each do |epg_block|
data = epg_block["link-sets"]
if data["Policies"]
records << "count_of_policies{tenant=\"#{epg_block["tenantName"]}\", network=\"#{epg_block["networkName"]}\", endpointGroup=\"#{epg_block["groupName"]}\"} #{data["Policies"].length}"
end
if data["NetProfiles"]
records << "count_of_netprofiles{tenant=\"#{epg_block["tenantName"]}\", network=\"#{epg_block["networkName"]}\", endpointGroup=\"#{epg_block["groupName"]}\"} #{data["NetProfiles"].length}"
end
end

records
end
4 changes: 2 additions & 2 deletions prometheus.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Prometheus configuration to scrape all netplugin endpoints every 5 seconds
scrape_configs:
- job_name: 'kubernetes-pods'
scrape_interval: 15s
scrape_timeout: 10s
scrape_interval: 5s
scrape_timeout: 5s

kubernetes_sd_configs:
- role: pod
Expand Down

0 comments on commit 56b8dd0

Please sign in to comment.