diff --git a/dnsworker/bin/dns-worker b/dnsworker/bin/dns-worker new file mode 100755 index 0000000..9f93094 --- /dev/null +++ b/dnsworker/bin/dns-worker @@ -0,0 +1,50 @@ +#!/usr/bin/env ruby +$:.unshift File.expand_path('../../lib', File.realpath(__FILE__)) + +require 'yaml' +require 'optparse' +require 'ostruct' + +require 'dnsworker/worker' + +options = OpenStruct.new +options.once = false +options.dry_run = false +options.extra = nil + +OptionParser.new do |opts| + opts.banner = 'Usage: webdns-worker [options]' + + opts.on('-c', '--config CONFIG', 'Config file') do |c| + options[:config] = c + end + + opts.on('-e', '--extra CONFIG', 'Extra config file') do |e| + options[:extra] = e + end + + opts.on('-n', '--dry-run', 'Run but not execute or mark anything') do |n| + options[:dry_run] = n + end + + opts.on('-o', '--once', 'Run once') do |o| + options[:once] = o + end + + opts.on('-d', '--cmdline-dispatch TYPE:JSON') do |d| + options[:cmdline_type], options[:cmdline_body] = d.split(':', 2) + + options.cmdline_body = JSON.parse(options.cmdline_body, symbolize_names: true) if options.cmdline_body + end + +end.parse! + +cfg = YAML.load_file(options.config) +cfg.merge!(YAML.load_file(options.extra)) if options.extra + +w = DNSWorker::Worker.new(cfg) +if options.cmdline_type + w.cmdline(options.cmdline_type, options.cmdline_body) +else + w.work(once: options.once, dry_run: options.dry_run) +end diff --git a/dnsworker/bin/ds-monitor b/dnsworker/bin/ds-monitor new file mode 100755 index 0000000..fd3025e --- /dev/null +++ b/dnsworker/bin/ds-monitor @@ -0,0 +1,100 @@ +#!/usr/bin/python + +import argparse +import sys +import dns +from dns import name, resolver, flags, rdtypes +import struct +import time +from collections import defaultdict +from sets import Set +import subprocess + +class DSValidator(object): + + def __init__(self, domain): + self.resolver = resolver.Resolver() + self.resolver.use_edns(0, flags.DO, 4096) + self.domain = name.from_text(domain, name.root) + + def soa(self): + return resolver.zone_for_name( + self.domain, + tcp=True, + resolver=self.resolver) + + def get_ds(self): + try: + answers = self.resolver.query(self.domain, 'DS') + except resolver.NoAnswer: + answers = [] + except resolver.NoNameservers: + answers = [] + + return answers + + def validateDS(self, dss): + from itertools import groupby + upstream = dict((k ,list(g)) for k,g in groupby(self.get_ds(), lambda ds: (ds.key_tag, ds.algorithm))) + dss = dict((k, list(g)) for k, g in groupby(dss, lambda ds: (ds.key_tag, ds.algorithm))) + + missing = [] + for (key_tag, algo), records in dss.iteritems(): + if not (key_tag, algo) in upstream: + missing.append((key_tag, algo)) + continue + + ups_records = Set([ds.to_text() for ds in upstream[(key_tag, algo)]]) + records = Set([ds.to_text() for ds in records]) + + # A single match is neeeded for a (key_tag, algo) pair + if records & ups_records: + continue + + missing.apend((key_tag, algo)) + + return missing + + +def main(): + parser = argparse.ArgumentParser( + description='Check for OpenDNSSEC ds-seen status') + args = parser.parse_args() + + ds_per_domain = defaultdict(list) + cmd = '/usr/bin/ods-ksmutil key export -e ready -t ksk --ds'.split() + for line in subprocess.check_output(cmd).splitlines(): + if not line.strip() or line[0] == ';': + continue + + domain, ttl, _f, rtype, rdata = line.split(None, 4) + if _f != 'IN' and rtype != 'DS': + continue + + if domain == 'example.com.': + print 'skip example' + continue + + in_class = dns.rdataclass.IN + ds_type = dns.rdatatype.DS + ds = dns.rdata.from_text(in_class, ds_type, rdata) + + ds_per_domain[domain].append(ds) + + for domain, dss in ds_per_domain.items(): + finder = DSValidator(domain) + missing = finder.validateDS(dss) + if not missing: + keys = Set([ds.key_tag for ds in dss]) + + for key in keys: + cmd = "/usr/bin/ods-ksmutil key ds-seen -z %s -x %s" % (domain, key) + print "%s" % cmd + subprocess.check_output(cmd.split()) # check on double run + continue + + for miss in missing: + print "%s ds-missing '%s'" % (finder.domain, miss) + +if __name__ == '__main__': + main() diff --git a/dnsworker/bin/ds-schedule b/dnsworker/bin/ds-schedule new file mode 100755 index 0000000..1441709 --- /dev/null +++ b/dnsworker/bin/ds-schedule @@ -0,0 +1,105 @@ +#!/usr/bin/env ruby + +require 'uri' +require 'rack/utils' +require 'net/http' +require 'open3' + +class DSPusher + attr_reader :cfg + + def initialize(cfg) + @cfg = cfg + end + + def push_ds(zone=nil, dry_run=false) + fetch_dss(zone).each { |d, dss| + post_dss(d, dss, dry_run) + } + end + + def fetch_dss(zone) + dss = Hash.new { |h, k| h[k] = [] } + + cmd = cfg['get_ds'] + cmd += " -z #{zone}" if zone + + out, err = command(cmd) + out.each_line { |line| + line.strip! + + next if line.nil? || line == '' + next if line.start_with?(';') + domain, _ttl, _f, rtype, rdata = line.split(/\s+/, 5) + next unless rtype == 'DS' + + domain = domain.gsub(/\.+$/, '') # Remove trailing . + dss[domain] << rdata + } + + dss + end + + def post_dss(domain, dss, dry_run) + query = Rack::Utils.build_nested_query( + domain: domain, + event: 'push_ds', + args: dss, + ) + uri = URI("#{cfg['webdns_base']}#{cfg['update_state']}" % { query: query }) + if dry_run + p [:uri, uri] + return + end + + Net::HTTP.start(uri.host, uri.port) do |http| + resp = http.request Net::HTTP::Put.new(uri.request_uri) + + fail JobFailed if resp.code != '200' + end + end + + private + + def command(cmd) + out, err, status = Open3.capture3(cmd) + raise "Command error err='#{err.strip}' out='#{out.strip}'" if !status.success? + + [out, err] + end +end + +require 'yaml' +require 'optparse' +require 'ostruct' + +options = OpenStruct.new +options.dry_run = false +options.extra = nil +options.zone = nil + +OptionParser.new do |opts| + opts.banner = 'Usage: push-ds [options]' + + opts.on('-c', '--config CONFIG', 'Config file') do |c| + options[:config] = c + end + + opts.on('-e', '--extra CONFIG', 'Extra config file') do |e| + options[:extra] = e + end + + opts.on('-n', '--dry-run', 'Run but not execute anything') do |n| + options[:dry_run] = n + end + + opts.on('-z', '--zone ZONE', 'Zone') do |z| + options[:zone] = z + end +end.parse! + +cfg = YAML.load_file(options.config) +cfg.merge!(YAML.load_file(options.extra)) if options.extra + +w = DSPusher.new(cfg) +w.push_ds(options.zone, options.dry_run) diff --git a/dnsworker/cron b/dnsworker/cron new file mode 100644 index 0000000..5899e4f --- /dev/null +++ b/dnsworker/cron @@ -0,0 +1,2 @@ +* * * * * root /srv/dnsworker/bin/ds-schedule -c /etc/dnsworker/cfg.yml +* * * * * root /srv/dnsworker/bin/ds-monitor diff --git a/dnsworker/dnsworker.service b/dnsworker/dnsworker.service new file mode 100644 index 0000000..8d5ae8a --- /dev/null +++ b/dnsworker/dnsworker.service @@ -0,0 +1,10 @@ +[Unit] +Description=WebDNS Worker +Wants=network-online.target +After=network-online.target + +[Service] +ExecStart=/srv/dnsworker/bin/dns-worker -c /etc/dnsworker/cfg.yml + +[Install] +WantedBy=multi-user.target diff --git a/dnsworker/fabfile.py b/dnsworker/fabfile.py new file mode 100644 index 0000000..6da9aa8 --- /dev/null +++ b/dnsworker/fabfile.py @@ -0,0 +1,32 @@ +from fabric.api import env, cd +from fabric.operations import run, put, sudo + +env.hosts = ['dnssec-edet4.grnet.gr'] + +def setup(): + put('dnsworker.service', '/etc/systemd/system/', use_sudo=True) + sudo('systemctl daemon-reload') + sudo('systemctl enable dnsworker.service') + +def check(): + run('test -d /run/systemd/system') + run('test -f /etc/dnsworker/cfg.yml') + run('systemctl is-enabled dnsworker.service') + +def restart(): + sudo('systemctl restart dnsworker.service') + +def install_cron(): + put('cron', '/etc/cron.d/dnsworker', use_sudo=True) + +def copy(): + sudo('mkdir -p /srv/dnsworker') + with cd('/srv/dnsworker'): + put('lib', '.', use_sudo=True) + put('bin', '.', use_sudo=True, mode=0755) + +def deploy(): + check() + restart() + install_cron() + diff --git a/dnsworker/lib/dnsworker.rb b/dnsworker/lib/dnsworker.rb new file mode 100644 index 0000000..c47e4e6 --- /dev/null +++ b/dnsworker/lib/dnsworker.rb @@ -0,0 +1,2 @@ +module DNSWorker +end diff --git a/dnsworker/lib/dnsworker/base_worker.rb b/dnsworker/lib/dnsworker/base_worker.rb new file mode 100644 index 0000000..ea26cac --- /dev/null +++ b/dnsworker/lib/dnsworker/base_worker.rb @@ -0,0 +1,134 @@ +require 'json' +require 'mysql2' +require 'open3' + +module DNSWorker::BaseWorker + class JobFailed < StandardError; end + class Retry < StandardError; end + class CmdFailed < StandardError; end + + attr_reader :client + attr_reader :cfg + attr_reader :dry_run + attr_accessor :working + + # Start consuming jobs. + # + # Handles reconnects. + def work(opts = {}) + @dry_run = opts[:dry_run] + + register_signals + opts[:once] ? run : watch + end + + # Graceful stop the worker. + # + # If no job is running stops immediately. + def stop + if working + @stop = true + else + exit + end + end + + private + + def stop? + @stop + end + + def system(cmd) + p [:cmd, cmd] + Kernel.system(cmd) + end + + def register_signals + trap('INT') { stop } + trap('TERM') { stop } + end + + def watch + loop do + procline('watching') + break if stop? + + run + + sleep(cfg['timeout']) + end + end + + def run + self.working = true + jobs = client.query('select * from jobs where status in (0, 2) order by id asc').to_a + jobs.group_by { |j| j[:domain_id] }.each { |domain_id, jobs| + process_jobs(domain_id, jobs) + } + self.working = false + end + + def process_jobs(domain_id, jobs) + catch(:stop_process) do + jobs.each { |job| + if job[:status] == 2 # Stop on processing zone on failed jobs + $stderr.puts "Not processing domain=#{domain_id} because a failed job exists jobid=#{job[:id]} |#{job}|" + throw :stop_process + end + dispatch(job) + } + end + end + + def procline(line) + $0 = "dnsworker-#{line}" + end + + def initialize(mysql_cfg) + my_cfg = mysql_cfg.merge(reconnect: true, symbolize_keys: true) + @client = Mysql2::Client.new(my_cfg) + end + + def dispatch(job) + id, jtype, jargs = job.values_at(:id, :job_type, :args) + jargs = JSON.parse(jargs, symbolize_names: true) + + procline("working on jobid=#{id} #{jtype} #{jargs}") + $stderr.puts "working on jobid=#{id} #{jtype} #{jargs}" + send(jtype, jargs) unless dry_run + mark_done(id) + + rescue Retry + mark_retry(id) + throw :stop_process + rescue CmdFailed + mark_fail(id) + throw :stop_process + rescue JobFailed + mark_fail(id) + throw :stop_process + end + + def mark_done(id) + $stderr.puts :done + client.query("update jobs set status=1 where id=#{id}") unless dry_run + end + + def mark_fail(id) + $stderr.puts :fail + client.query("update jobs set status=2 where id=#{id}") unless dry_run + end + + def mark_retry(id) + $stderr.puts :retry + client.query("update jobs set retries = retries + 1 where id=#{id}") unless dry_run + end + + def cmd(command) + out, err, status = Open3.capture3(command) + raise CmdFailed if !status.success? + + [out, err] + end +end diff --git a/dnsworker/lib/dnsworker/pushers/base.rb b/dnsworker/lib/dnsworker/pushers/base.rb new file mode 100644 index 0000000..f599b4a --- /dev/null +++ b/dnsworker/lib/dnsworker/pushers/base.rb @@ -0,0 +1,24 @@ +module DNSWorker + module Pushers + class Base + attr_reader :cfg + + def initialize(cfg, debug=false) + @cfg = cfg + @debug = @debug + end + + def log(this) + $stderr.puts(this) if debug? + end + + def debug? + @debug + end + + def replace_ds(parent, zone, dss) + raise NotImplementedError + end + end + end +end diff --git a/dnsworker/lib/dnsworker/pushers/papaki.rb b/dnsworker/lib/dnsworker/pushers/papaki.rb new file mode 100644 index 0000000..0934d71 --- /dev/null +++ b/dnsworker/lib/dnsworker/pushers/papaki.rb @@ -0,0 +1,105 @@ +#!/usr/bin/env ruby +require 'json' +require 'faraday_middleware' + +class DNSWorker::Pushers::Papaki < DNSWorker::Pushers::Base + + def replace_ds(_parent, domain, dss) + dss = prepare_dss(dss) + current = current_ds(domain) + + # Papaki needs an empty publicKey attribute + to_add = (dss - current).map { |ds| ds['publicKey'] = ''; ds } + to_remove = (current - dss).map { |ds| ds['publicKey'] = ''; ds } + + p [:add, to_add] + p [:rem, to_remove] + return true if to_add.empty? && to_remove.empty? + + req = { + type: 'managednssec', + domainname: domain, + } + + req['dnssectoadd'] = { ds: dss } if dss.any? + req['dnssectoremove'] = { ds: current } if current.any? + request(req) + + true + end + + def current_ds(domain) + resp = request(type: 'dnssecinfo', domainname: domain) + + # Remove unessesary keys + resp['dsrecords'].map { |ds| + key, alg, digest = ds.values_at('keyTag', 'alg', 'digest') + Hash['keyTag', key, 'alg', alg, 'digest', digest.downcase] + } + end + + private + + def client + @client ||= client! + end + + def client! + api = Faraday.new cfg['papaki_host'] do |conn| + conn.response :json + + #conn.response :logger + conn.adapter Faraday.default_adapter + end + end + + + def prepare_dss(dss) + # Papaki seems to accept specific digest_algorithms based on the DNSKEY algo + # This map will guide that through this. + # + alg_digest_map = { + '5' => '1', # RSA/SHA-1 => SHA1 + '7' => '1', # RSASHA1-NSEC3-SHA1 => SHA1 + '8' => '2', # RSA/SHA-256 => SHA-256 + '10' => '2', # RSA/SHA-512 => SHA-256 + } + ds_for_papaki = [] + + dss.each { |ds_line| + # 11845 8 1 f781fc2422bf265b5606d7dc095d15183014ee6a' + key, alg, digest_type, digest = ds_line.split + + # Send only digest types supported by papaki + next if digest_type != alg_digest_map[alg] + + ds_for_papaki << Hash['keyTag', key, 'alg', alg, 'digest', digest.downcase] + } + + ds_for_papaki + end + + def ds_info(domain) + request(type: 'dnssecinfo', domainname: domain) + end + + def request(data) + resp = client.post cfg['papaki_endpoint'] do |r| + r.headers['Content-Type'] = 'application/x-www-form-urlencoded' + data_with_key = data.merge(apiKey: cfg['papaki_key']) + r.body = JSON.dump({request: data_with_key}) + end + + puts "\n* Request #{data[:do] || data[:type]} for #{data[:domainname]}" + data.each { |k,v| + puts "#{k}: #{v}" + } + puts "\n* Response #{data[:do] || data[:type]} for #{data[:domainname]}" + resp.body['response'].tap { |r| + raise "#{r['code']}: #{r['message']}" if r['code'] != '1000' + r.to_a.sort.each { |k, v| + puts "#{k}: #{v}" + } + } + end +end diff --git a/dnsworker/lib/dnsworker/pushers/webdns.rb b/dnsworker/lib/dnsworker/pushers/webdns.rb new file mode 100644 index 0000000..dbd5cc9 --- /dev/null +++ b/dnsworker/lib/dnsworker/pushers/webdns.rb @@ -0,0 +1,23 @@ +#!/usr/bin/env ruby +require 'json' +require 'rack/utils' + +class DNSWorker::Pushers::Webdns < DNSWorker::Pushers::Base + def replace_ds(parent, zone, dss) + query = Rack::Utils.build_nested_query( + child: zone, + parent: parent, + ds: dss, + ) + + uri = URI(cfg.values_at('webdns_base', 'webdns_replace_ds').join % { query: query }) + + Net::HTTP.start(uri.host, uri.port) do |http| + resp = http.request Net::HTTP::Put.new(uri.request_uri) + + return false if resp.code != '200' + end + + true + end +end diff --git a/dnsworker/lib/dnsworker/worker.rb b/dnsworker/lib/dnsworker/worker.rb new file mode 100755 index 0000000..d643f5e --- /dev/null +++ b/dnsworker/lib/dnsworker/worker.rb @@ -0,0 +1,109 @@ +require 'json' +require 'net/http' +require 'uri' +require 'pp' + +require 'rack/utils' + +require 'dnsworker' +require 'dnsworker/base_worker' +require 'dnsworker/pushers/base' +require 'dnsworker/pushers/papaki' +require 'dnsworker/pushers/webdns' + +class DNSWorker::Worker + include DNSWorker::BaseWorker + + Pushers = Hash[ + :papaki, DNSWorker::Pushers::Papaki, + :webdns, DNSWorker::Pushers::Webdns, + ] + + def initialize(cfg) + @cfg = cfg + super(cfg['mysql']) + end + + def add_domain(params) + params[:master] = cfg['hidden_master'] + cmd(cfg['bind_add'] % params) + end + + def remove_domain(params) + cmd(cfg['bind_del'] % params) + end + + def opendnssec_add(params) + cmd(cfg['ods_add'] % params) + end + + def opendnssec_remove(params) + cmd(cfg['ods_del'] % params) + end + + def bind_convert_to_dnssec(params) + fail Retry if !File.exist? File.join(cfg['zone_root'], 'signed', params[:zone]) + + # Remove zone and re-add it as a master zone + remove_domain(params) + cmd(cfg['bind_add_dnssec'] % params) + end + + # The zone is signed, waiting for the ksk to become ready + def wait_for_ready_to_push_ds(params) + out, _err = cmd(cfg['ready_to_push_ds'] % params) + + fail Retry unless out['ds-seen'] + end + + def publish_ds(params) + pub_cls = Pushers[params[:dnssec_parent_authority].to_sym] + fail JobFailed unless pub_cls + + pub = pub_cls.new(cfg) + + fail JobFailed unless pub.replace_ds(params[:dnssec_parent], params[:zone], params[:dss]) + end + + def wait_for_active(params) + keytag = params[:keytag] + out, _err = cmd(cfg['key_activated'] % params) + key_lines = out.each_line.select { |line| line.start_with?(params[:zone]) } + + # Check if the key is activated + return if key_lines.any? {|kl| + # example + # KSK active 2016-12-12 18:41:33 (retire) 2048 8 b70042f966e5f01deb2e988607ad67ba SoftHSM 60076 + + kl.strip! + _domain, _type, status, _rest = kl.split(/\s+/, 4) + + status == 'active' and _rest.end_with?(keytag) + } + + fail Retry + end + + def trigger_event(params) + query = Rack::Utils.build_query(domain: params[:zone], event: params[:event]) + uri = URI(cfg.values_at('webdns_base', 'update_state').join % { query: query }) + + Net::HTTP.start(uri.host, uri.port) do |http| + resp = http.request Net::HTTP::Put.new(uri.request_uri) + + fail JobFailed if resp.code != '200' + ok = JSON.parse(resp.body)['ok'] + fail JobFailed if !ok + end + end + + private + + def cmdline(jtype, jargs) + if jargs + send(jtype, jargs) + else + send(jtype) + end + end +end