diff --git a/app/controllers/schedules_controller.rb b/app/controllers/schedules_controller.rb
index 61e7742..94a3320 100644
--- a/app/controllers/schedules_controller.rb
+++ b/app/controllers/schedules_controller.rb
@@ -1,34 +1,41 @@
class SchedulesController < ApplicationController
+ before_action :fetch_host, only: [:new, :create]
+
def new
- @schedule = Schedule.new
+ @schedule = @host.schedules.new
end
def show
end
def edit
end
def update
end
def create
- @schedule = Schedule.new(fetch_params)
+ @schedule = @host.schedules.new(fetch_params)
+
@schedule.runtime = params[:schedule][:runtime] if params[:schedule][:runtime]
if @schedule.save
- redirect_to root_path
+ redirect_to host_path(@host)
else
render :new
end
end
def destroy
end
private
+ def fetch_host
+ @host = Host.find(params[:host_id])
+ end
+
def fetch_params
params.require(:schedule).permit(:name)
end
end
diff --git a/app/models/host.rb b/app/models/host.rb
index 4d18e4a..33ce8ad 100644
--- a/app/models/host.rb
+++ b/app/models/host.rb
@@ -1,75 +1,77 @@
class Host < ActiveRecord::Base
establish_connection Baas::settings[:local_db]
FILE_RETENTION_DAYS = 60
JOB_RETENTION_DAYS = 180
CATALOG = 'MyCatalog'
AUTOPRUNE = 1
enum status: { draft: 0, pending: 1, config: 2, ready: 3 }
belongs_to :client, class_name: :Client, foreign_key: :name, primary_key: :name
+
has_many :filesets, dependent: :destroy
has_many :job_templates, dependent: :destroy
+ has_many :schedules, dependent: :destroy
validates :file_retention, :job_retention,
:port, :password, presence: true
validates :port, numericality: true
validates :fqdn, presence: true, uniqueness: true
validate :fqdn_format
scope :not_baculized, -> { where(baculized: false) }
before_validation :set_retention, :unset_baculized, :sanitize_name
def baculize_config
templates = job_templates.enabled.includes(:fileset, :schedule)
result = [self] + templates.map {|x| [x, x.fileset, x.schedule] }.flatten.compact.uniq
result.map(&:to_bacula_config_array)
end
def to_bacula_config_array
[
"Client {",
" Name = #{name}",
" Address = #{fqdn}",
" FDPort = #{port}",
" Catalog = #{CATALOG}",
" Password = \"#{password}\"",
" File Retention = #{file_retention} days",
" Job Retention = #{job_retention} days",
" AutoPrune = yes",
"}"
]
end
def auto_prune_human
AUTOPRUNE == 1 ? 'yes' : 'no'
end
private
def sanitize_name
self.name = fqdn
end
def set_retention
self.file_retention = FILE_RETENTION_DAYS
self.job_retention = JOB_RETENTION_DAYS
end
def unset_baculized
self.baculized = false if new_record?
true
end
def fqdn_format
regex = /(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?
<%= f.text_field :name %>
<% if !@job.restore? %>
<% end %>
<%= f.text_field :restore_location, label: 'Restore Location', placeholder: '/tmp/bacula' %>
<% end %>
diff --git a/app/views/schedules/_form.html.erb b/app/views/schedules/_form.html.erb
index 12b0836..9d32e92 100644
--- a/app/views/schedules/_form.html.erb
+++ b/app/views/schedules/_form.html.erb
@@ -1,26 +1,26 @@
-<%= bootstrap_form_for(@schedule, layout: :horizontal,
+<%= bootstrap_form_for(@schedule, url: host_schedules_path(@host), layout: :horizontal,
label_col: 'col-xs-3', control_col: 'col-xs-8') do |f| %>
<% if @schedule.errors.any? %>
<%= pluralize(@schedule.errors.count, "error") %> prohibited this schedule from being saved:
<% @schedule.errors.full_messages.each do |message| %>
- <%= message %>
<% end %>
<% end %>
<%= f.text_field :name %>
<%= f.time_field :runtime, placeholder: 'HH:MM' %>
<% end %>
diff --git a/config/routes.rb b/config/routes.rb
index 7328271..17793cf 100644
--- a/config/routes.rb
+++ b/config/routes.rb
@@ -1,17 +1,16 @@
Rails.application.routes.draw do
resources :clients, only: [:index, :show]
resources :hosts, only: [:new, :create, :show, :edit, :update, :destroy] do
resources :jobs, only: [:new, :create, :show, :edit, :update, :destroy] do
member do
patch :toggle_enable
end
end
resources :filesets, only: [:show, :new, :create, :destroy]
+ resources :schedules, only: [:show, :new, :edit, :create, :update, :destroy]
end
- resources :schedules, only: [:show, :new, :edit, :create, :update, :destroy]
-
root 'clients#index'
end
diff --git a/db/migrate/20151102194922_add_host_id_to_schedules.rb b/db/migrate/20151102194922_add_host_id_to_schedules.rb
new file mode 100644
index 0000000..732a038
--- /dev/null
+++ b/db/migrate/20151102194922_add_host_id_to_schedules.rb
@@ -0,0 +1,11 @@
+class AddHostIdToSchedules < ActiveRecord::Migration
+ def connection
+ ActiveRecord::Base.establish_connection(Baas::settings[:local_db]).connection
+ end
+
+ def change
+ add_column :schedules, :host_id, :integer
+
+ add_index :schedules, :host_id
+ end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 2af491d..317c627 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,382 +1,385 @@
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20151101142440) do
+ActiveRecord::Schema.define(version: 20151102194922) do
create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
t.integer "BaseJobId", null: false
t.integer "JobId", null: false
t.integer "FileId", limit: 8, null: false
t.integer "FileIndex"
end
add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
create_table "CDImages", primary_key: "MediaId", force: true do |t|
t.datetime "LastBurn", null: false
end
create_table "Client", primary_key: "ClientId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.binary "Uname", limit: 255, null: false
t.integer "AutoPrune", limit: 1, default: 0
t.integer "FileRetention", limit: 8, default: 0
t.integer "JobRetention", limit: 8, default: 0
end
add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "Counters", id: false, force: true do |t|
t.binary "Counter", limit: 255, null: false
t.integer "MinValue", default: 0
t.integer "MaxValue", default: 0
t.integer "CurrentValue", default: 0
t.binary "WrapCounter", limit: 255, null: false
end
create_table "Device", primary_key: "DeviceId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "StorageId", default: 0
t.integer "DevMounts", default: 0
t.integer "DevReadBytes", limit: 8, default: 0
t.integer "DevWriteBytes", limit: 8, default: 0
t.integer "DevReadBytesSinceCleaning", limit: 8, default: 0
t.integer "DevWriteBytesSinceCleaning", limit: 8, default: 0
t.integer "DevReadTime", limit: 8, default: 0
t.integer "DevWriteTime", limit: 8, default: 0
t.integer "DevReadTimeSinceCleaning", limit: 8, default: 0
t.integer "DevWriteTimeSinceCleaning", limit: 8, default: 0
t.datetime "CleaningDate"
t.integer "CleaningPeriod", limit: 8, default: 0
end
create_table "File", primary_key: "FileId", force: true do |t|
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
t.integer "DeltaSeq", limit: 2, default: 0
t.integer "MarkId", default: 0
t.binary "LStat", limit: 255, null: false
t.binary "MD5", limit: 255
end
add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
add_index "File", ["JobId"], name: "JobId", using: :btree
create_table "FileSet", primary_key: "FileSetId", force: true do |t|
t.binary "FileSet", limit: 255, null: false
t.binary "MD5", limit: 255
t.datetime "CreateTime"
end
create_table "Filename", primary_key: "FilenameId", force: true do |t|
t.binary "Name", null: false
end
add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
create_table "Job", primary_key: "JobId", force: true do |t|
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
create_table "JobHisto", id: false, force: true do |t|
t.integer "JobId", null: false
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
t.integer "JobId", null: false
t.integer "MediaId", null: false
t.integer "FirstIndex", default: 0
t.integer "LastIndex", default: 0
t.integer "StartFile", default: 0
t.integer "EndFile", default: 0
t.integer "StartBlock", default: 0
t.integer "EndBlock", default: 0
t.integer "VolIndex", default: 0
end
add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
create_table "Location", primary_key: "LocationId", force: true do |t|
t.binary "Location", limit: 255, null: false
t.integer "Cost", default: 0
t.integer "Enabled", limit: 1
end
create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
t.datetime "Date"
t.binary "Comment", null: false
t.integer "MediaId", default: 0
t.integer "LocationId", default: 0
t.string "NewVolStatus", limit: 9, null: false
t.integer "NewEnabled", limit: 1
end
create_table "Log", primary_key: "LogId", force: true do |t|
t.integer "JobId", default: 0
t.datetime "Time"
t.binary "LogText", null: false
end
add_index "Log", ["JobId"], name: "JobId", using: :btree
create_table "Media", primary_key: "MediaId", force: true do |t|
t.binary "VolumeName", limit: 255, null: false
t.integer "Slot", default: 0
t.integer "PoolId", default: 0
t.binary "MediaType", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "LabelType", limit: 1, default: 0
t.datetime "FirstWritten"
t.datetime "LastWritten"
t.datetime "LabelDate"
t.integer "VolJobs", default: 0
t.integer "VolFiles", default: 0
t.integer "VolBlocks", default: 0
t.integer "VolMounts", default: 0
t.integer "VolBytes", limit: 8, default: 0
t.integer "VolParts", default: 0
t.integer "VolErrors", default: 0
t.integer "VolWrites", default: 0
t.integer "VolCapacityBytes", limit: 8, default: 0
t.string "VolStatus", limit: 9, null: false
t.integer "Enabled", limit: 1, default: 1
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "InChanger", limit: 1, default: 0
t.integer "StorageId", default: 0
t.integer "DeviceId", default: 0
t.integer "MediaAddressing", limit: 1, default: 0
t.integer "VolReadTime", limit: 8, default: 0
t.integer "VolWriteTime", limit: 8, default: 0
t.integer "EndFile", default: 0
t.integer "EndBlock", default: 0
t.integer "LocationId", default: 0
t.integer "RecycleCount", default: 0
t.datetime "InitialWrite"
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.binary "Comment"
end
add_index "Media", ["PoolId"], name: "PoolId", using: :btree
add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
t.binary "MediaType", limit: 255, null: false
t.integer "ReadOnly", limit: 1, default: 0
end
create_table "Path", primary_key: "PathId", force: true do |t|
t.binary "Path", null: false
end
add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
t.integer "PPathId", null: false
end
add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
create_table "PathVisibility", id: false, force: true do |t|
t.integer "PathId", null: false
t.integer "JobId", null: false
t.integer "Size", limit: 8, default: 0
t.integer "Files", default: 0
end
add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
create_table "Pool", primary_key: "PoolId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "NumVols", default: 0
t.integer "MaxVols", default: 0
t.integer "UseOnce", limit: 1, default: 0
t.integer "UseCatalog", limit: 1, default: 0
t.integer "AcceptAnyVolume", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "AutoPrune", limit: 1, default: 0
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.string "PoolType", limit: 9, null: false
t.integer "LabelType", limit: 1, default: 0
t.binary "LabelFormat", limit: 255
t.integer "Enabled", limit: 1, default: 1
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.integer "NextPoolId", default: 0
t.integer "MigrationHighBytes", limit: 8, default: 0
t.integer "MigrationLowBytes", limit: 8, default: 0
t.integer "MigrationTime", limit: 8, default: 0
end
add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
t.binary "ObjectName", null: false
t.binary "RestoreObject", limit: 2147483647, null: false
t.binary "PluginName", limit: 255, null: false
t.integer "ObjectLength", default: 0
t.integer "ObjectFullLength", default: 0
t.integer "ObjectIndex", default: 0
t.integer "ObjectType", default: 0
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "ObjectCompression", default: 0
end
add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
create_table "Status", primary_key: "JobStatus", force: true do |t|
t.binary "JobStatusLong"
t.integer "Severity"
end
create_table "Storage", primary_key: "StorageId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "AutoChanger", limit: 1, default: 0
end
create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
end
create_table "Version", id: false, force: true do |t|
t.integer "VersionId", null: false
end
create_table "filesets", force: true do |t|
t.string "name"
t.integer "host_id"
t.text "exclude_directions"
t.text "include_directions"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "filesets", ["host_id"], name: "index_filesets_on_host_id", using: :btree
create_table "hosts", force: true do |t|
t.binary "name", limit: 255, null: false
t.binary "fqdn", limit: 255, null: false
t.integer "port", null: false
t.integer "file_retention", null: false
t.integer "job_retention", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "password"
t.boolean "baculized", default: false, null: false
t.datetime "baculized_at"
t.integer "status", limit: 1, default: 0
t.integer "client_id"
end
add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
create_table "job_templates", force: true do |t|
t.string "name", null: false
t.integer "job_type", limit: 1
t.integer "host_id"
t.integer "fileset_id"
t.integer "schedule_id"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "enabled", default: false
t.binary "restore_location"
end
create_table "schedules", force: true do |t|
- t.string "name"
- t.string "runs"
+ t.string "name"
+ t.string "runs"
+ t.integer "host_id"
end
+ add_index "schedules", ["host_id"], name: "index_schedules_on_host_id", using: :btree
+
create_table "users", force: true do |t|
t.string "username", null: false
t.string "email"
t.integer "user_type", limit: 1, null: false
t.boolean "enabled", default: false
t.datetime "created_at"
t.datetime "updated_at"
end
end
diff --git a/spec/controllers/schedules_controller_spec.rb b/spec/controllers/schedules_controller_spec.rb
index ec56863..a6ccdb8 100644
--- a/spec/controllers/schedules_controller_spec.rb
+++ b/spec/controllers/schedules_controller_spec.rb
@@ -1,54 +1,79 @@
require 'spec_helper'
describe SchedulesController do
+ let(:host) { FactoryGirl.create(:host) }
+
describe 'GET #new' do
- before { get :new }
+ before { get :new, host_id: host.id }
it 'initializes a schedule' do
expect(assigns(:schedule)).to be
end
+ it 'sets the schedule\'s host' do
+ expect(assigns(:schedule).host).to eq(host)
+ end
+
it 'renders' do
expect(response).to render_template(:new)
end
end
describe 'POST #create' do
context 'with valid params' do
let(:params) do
{
+ host_id: host.id,
schedule: { name: FactoryGirl.build(:schedule).name, runtime: '19:17' }
}
end
it 'creates the schedule' do
expect { post :create, params }.
- to change { Schedule.count }.by(1)
+ to change { host.schedules(true).count }.by(1)
end
- it 'redirects to root' do
+ it 'redirects to host' do
post :create, params
- expect(response).to redirect_to(root_path)
+ expect(response).to redirect_to(host_path(host))
+ end
+ end
+
+ context 'with invalid host' do
+ it 'raises not found error' do
+ expect {
+ post :create, { host_id: -1, schedule: { invalid: true } }
+ }.to raise_error(ActiveRecord::RecordNotFound)
end
end
context 'with invalid params' do
- let(:params) { { schedule: { invalide: :foo } } }
+ let(:params) do
+ {
+ host_id: host.id,
+ schedule: { invalide: :foo }
+ }
+ end
it 'initializes a schedule with errors' do
post :create, params
expect(assigns(:schedule)).to be
end
it 'does not create the schedule' do
expect { post :create, params }.
to_not change { Schedule.count }
end
it 'renders :new' do
post :create, params
expect(response).to render_template(:new)
end
+
+ it 'assigns the host to schedule' do
+ post :create, params
+ expect(assigns(:schedule).host).to eq(host)
+ end
end
end
end
diff --git a/spec/factories/schedule.rb b/spec/factories/schedule.rb
index 715d081..f4c49a3 100644
--- a/spec/factories/schedule.rb
+++ b/spec/factories/schedule.rb
@@ -1,8 +1,9 @@
FactoryGirl.define do
factory :schedule do
+ host
sequence(:name) { |n| "Schedule #{n}" }
runs ['Level=Full 1st sun at 2:05',
'Level=Differential 2nd-5th sun at 2:05',
'Level=Incremental mon-sat at 2:05']
end
end
diff --git a/spec/routing/schedule_routing_spec.rb b/spec/routing/schedule_routing_spec.rb
index af03985..c8476ec 100644
--- a/spec/routing/schedule_routing_spec.rb
+++ b/spec/routing/schedule_routing_spec.rb
@@ -1,27 +1,33 @@
require 'spec_helper'
describe SchedulesController do
- it 'routes GET /schedules/new' do
- expect(get('/schedules/new')).to route_to( { controller: 'schedules', action: 'new'})
+ it 'routes GET /hosts/:host_id/schedules/new' do
+ expect(get('/hosts/1/schedules/new')).
+ to route_to(controller: 'schedules', action: 'new', host_id: '1')
end
- it 'routes POST /schedules' do
- expect(post('/schedules')).to route_to( { controller: 'schedules', action: 'create'})
+ it 'routes POST /hosts/:host_id/schedules' do
+ expect(post('/hosts/1/schedules')).
+ to route_to(controller: 'schedules', action: 'create', host_id: '1')
end
- it 'routes GET /schedules/1' do
- expect(get('/schedules/1')).to route_to( { controller: 'schedules', action: 'show', id: '1' })
+ it 'routes GET /hosts/:host_id/schedules/:id' do
+ expect(get('/hosts/1/schedules/2')).
+ to route_to(controller: 'schedules', action: 'show', host_id: '1', id: '2')
end
- it 'routes GET /schedules/1/edit' do
- expect(get('/schedules/1/edit')).to route_to( { controller: 'schedules', action: 'edit', id: '1' })
+ it 'routes GET /hosts/:host_id/schedules/:id/edit' do
+ expect(get('/hosts/1/schedules/2/edit')).
+ to route_to(controller: 'schedules', action: 'edit', host_id: '1', id: '2')
end
- it 'routes PUT /schedules/1' do
- expect(put('/schedules/1')).to route_to( { controller: 'schedules', action: 'update', id: '1' })
+ it 'routes PUT /hosts/:host_id/schedules/:id' do
+ expect(put('/hosts/1/schedules/2')).
+ to route_to(controller: 'schedules', action: 'update', host_id: '1', id: '2')
end
- it 'routes DELETE /schedules/1' do
- expect(delete('/schedules/1')).to route_to( { controller: 'schedules', action: 'destroy', id: '1' })
+ it 'routes DELETE /hosts/:host_id/schedules/:id' do
+ expect(delete('/hosts/1/schedules/2')).
+ to route_to(controller: 'schedules', action: 'destroy', host_id: '1', id: '2')
end
end