Page MenuHomeGRNET

No OneTemporary

File Metadata

Created
Thu, Apr 24, 3:18 PM
diff --git a/app/models/host.rb b/app/models/host.rb
new file mode 100644
index 0000000..641c1ae
--- /dev/null
+++ b/app/models/host.rb
@@ -0,0 +1,51 @@
+class Host < ActiveRecord::Base
+ FILE_RETENTION_DAYS = 60
+ JOB_RETENTION_DAYS = 180
+ CATALOG = 'MyCatalog'
+
+ establish_connection :local_development
+
+ validates :file_retention, :job_retention,
+ :port, :password, presence: true
+ validates :port, numericality: true
+
+ validates :name, presence: true, uniqueness: true
+
+ validate :fqdn_format
+
+ before_validation :set_retention, :unset_baculized
+
+ def to_bacula_config_array
+ [
+ "Client {",
+ " Name = #{name}",
+ " Address = #{fqdn}",
+ " FDPort = #{port}",
+ " Catalog = #{CATALOG}",
+ " Password = \"#{password}\"",
+ " File Retention = #{file_retention} days",
+ " Job Retention = #{job_retention} days",
+ " AutoPrune = yes",
+ "}"
+ ]
+ end
+
+ private
+
+ def set_retention
+ self.file_retention = FILE_RETENTION_DAYS
+ self.job_retention = JOB_RETENTION_DAYS
+ end
+
+ def unset_baculized
+ self.baculized = false
+ true
+ end
+
+ def fqdn_format
+ regex = /(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)/
+ unless fqdn =~ regex
+ self.errors.add(:fqdn)
+ end
+ end
+end
diff --git a/db/migrate/20151024162823_create_hosts_table.rb b/db/migrate/20151024162823_create_hosts_table.rb
new file mode 100644
index 0000000..6e587e1
--- /dev/null
+++ b/db/migrate/20151024162823_create_hosts_table.rb
@@ -0,0 +1,19 @@
+class CreateHostsTable < ActiveRecord::Migration
+ def connection
+ ActiveRecord::Base.establish_connection(Baas::settings[:local_db]).connection
+ end
+
+ def change
+ create_table :hosts do |t|
+ t.binary :name, limit: 255, null: false
+ t.binary :fqdn, limit: 255, null: false
+ t.integer :port, null: false
+ t.integer :file_retention, null: false
+ t.integer :job_retention, null: false
+
+ t.timestamps
+ end
+
+ add_index :hosts, :name, unique: true, length: { name: 128}, using: :btree
+ end
+end
diff --git a/db/migrate/20151025070113_add_password_to_hosts.rb b/db/migrate/20151025070113_add_password_to_hosts.rb
new file mode 100644
index 0000000..d05c7b0
--- /dev/null
+++ b/db/migrate/20151025070113_add_password_to_hosts.rb
@@ -0,0 +1,9 @@
+class AddPasswordToHosts < ActiveRecord::Migration
+ def connection
+ ActiveRecord::Base.establish_connection(Baas::settings[:local_db]).connection
+ end
+
+ def change
+ add_column :hosts, :password, :string
+ end
+end
diff --git a/db/migrate/20151025110412_add_baculized_field_to_hosts.rb b/db/migrate/20151025110412_add_baculized_field_to_hosts.rb
new file mode 100644
index 0000000..9c36227
--- /dev/null
+++ b/db/migrate/20151025110412_add_baculized_field_to_hosts.rb
@@ -0,0 +1,10 @@
+class AddBaculizedFieldToHosts < ActiveRecord::Migration
+ def connection
+ ActiveRecord::Base.establish_connection(Baas::settings[:local_db]).connection
+ end
+
+ def change
+ add_column :hosts, :baculized, :boolean, default: false, null: false
+ add_column :hosts, :baculized_at, :datetime
+ end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 4ce3129..2708ead 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,337 +1,352 @@
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20151020192733) do
+ActiveRecord::Schema.define(version: 20151025110412) do
create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
t.integer "BaseJobId", null: false
t.integer "JobId", null: false
t.integer "FileId", limit: 8, null: false
t.integer "FileIndex"
end
add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
create_table "CDImages", primary_key: "MediaId", force: true do |t|
t.datetime "LastBurn", null: false
end
create_table "Client", primary_key: "ClientId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.binary "Uname", limit: 255, null: false
t.integer "AutoPrune", limit: 1, default: 0
t.integer "FileRetention", limit: 8, default: 0
t.integer "JobRetention", limit: 8, default: 0
end
add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "Counters", id: false, force: true do |t|
t.binary "Counter", limit: 255, null: false
t.integer "MinValue", default: 0
t.integer "MaxValue", default: 0
t.integer "CurrentValue", default: 0
t.binary "WrapCounter", limit: 255, null: false
end
create_table "Device", primary_key: "DeviceId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "StorageId", default: 0
t.integer "DevMounts", default: 0
t.integer "DevReadBytes", limit: 8, default: 0
t.integer "DevWriteBytes", limit: 8, default: 0
t.integer "DevReadBytesSinceCleaning", limit: 8, default: 0
t.integer "DevWriteBytesSinceCleaning", limit: 8, default: 0
t.integer "DevReadTime", limit: 8, default: 0
t.integer "DevWriteTime", limit: 8, default: 0
t.integer "DevReadTimeSinceCleaning", limit: 8, default: 0
t.integer "DevWriteTimeSinceCleaning", limit: 8, default: 0
t.datetime "CleaningDate"
t.integer "CleaningPeriod", limit: 8, default: 0
end
create_table "File", primary_key: "FileId", force: true do |t|
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
t.integer "DeltaSeq", limit: 2, default: 0
t.integer "MarkId", default: 0
t.binary "LStat", limit: 255, null: false
t.binary "MD5", limit: 255
end
add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
add_index "File", ["JobId"], name: "JobId", using: :btree
create_table "FileSet", primary_key: "FileSetId", force: true do |t|
t.binary "FileSet", limit: 255, null: false
t.binary "MD5", limit: 255
t.datetime "CreateTime"
end
create_table "Filename", primary_key: "FilenameId", force: true do |t|
t.binary "Name", null: false
end
add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
create_table "Job", primary_key: "JobId", force: true do |t|
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
create_table "JobHisto", id: false, force: true do |t|
t.integer "JobId", null: false
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
t.integer "JobId", null: false
t.integer "MediaId", null: false
t.integer "FirstIndex", default: 0
t.integer "LastIndex", default: 0
t.integer "StartFile", default: 0
t.integer "EndFile", default: 0
t.integer "StartBlock", default: 0
t.integer "EndBlock", default: 0
t.integer "VolIndex", default: 0
end
add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
create_table "Location", primary_key: "LocationId", force: true do |t|
t.binary "Location", limit: 255, null: false
t.integer "Cost", default: 0
t.integer "Enabled", limit: 1
end
create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
t.datetime "Date"
t.binary "Comment", null: false
t.integer "MediaId", default: 0
t.integer "LocationId", default: 0
t.string "NewVolStatus", limit: 9, null: false
t.integer "NewEnabled", limit: 1
end
create_table "Log", primary_key: "LogId", force: true do |t|
t.integer "JobId", default: 0
t.datetime "Time"
t.binary "LogText", null: false
end
add_index "Log", ["JobId"], name: "JobId", using: :btree
create_table "Media", primary_key: "MediaId", force: true do |t|
t.binary "VolumeName", limit: 255, null: false
t.integer "Slot", default: 0
t.integer "PoolId", default: 0
t.binary "MediaType", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "LabelType", limit: 1, default: 0
t.datetime "FirstWritten"
t.datetime "LastWritten"
t.datetime "LabelDate"
t.integer "VolJobs", default: 0
t.integer "VolFiles", default: 0
t.integer "VolBlocks", default: 0
t.integer "VolMounts", default: 0
t.integer "VolBytes", limit: 8, default: 0
t.integer "VolParts", default: 0
t.integer "VolErrors", default: 0
t.integer "VolWrites", default: 0
t.integer "VolCapacityBytes", limit: 8, default: 0
t.string "VolStatus", limit: 9, null: false
t.integer "Enabled", limit: 1, default: 1
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "InChanger", limit: 1, default: 0
t.integer "StorageId", default: 0
t.integer "DeviceId", default: 0
t.integer "MediaAddressing", limit: 1, default: 0
t.integer "VolReadTime", limit: 8, default: 0
t.integer "VolWriteTime", limit: 8, default: 0
t.integer "EndFile", default: 0
t.integer "EndBlock", default: 0
t.integer "LocationId", default: 0
t.integer "RecycleCount", default: 0
t.datetime "InitialWrite"
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.binary "Comment"
end
add_index "Media", ["PoolId"], name: "PoolId", using: :btree
add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
t.binary "MediaType", limit: 255, null: false
t.integer "ReadOnly", limit: 1, default: 0
end
create_table "Path", primary_key: "PathId", force: true do |t|
t.binary "Path", null: false
end
add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
t.integer "PPathId", null: false
end
add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
create_table "PathVisibility", id: false, force: true do |t|
t.integer "PathId", null: false
t.integer "JobId", null: false
t.integer "Size", limit: 8, default: 0
t.integer "Files", default: 0
end
add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
create_table "Pool", primary_key: "PoolId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "NumVols", default: 0
t.integer "MaxVols", default: 0
t.integer "UseOnce", limit: 1, default: 0
t.integer "UseCatalog", limit: 1, default: 0
t.integer "AcceptAnyVolume", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "AutoPrune", limit: 1, default: 0
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.string "PoolType", limit: 9, null: false
t.integer "LabelType", limit: 1, default: 0
t.binary "LabelFormat", limit: 255
t.integer "Enabled", limit: 1, default: 1
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.integer "NextPoolId", default: 0
t.integer "MigrationHighBytes", limit: 8, default: 0
t.integer "MigrationLowBytes", limit: 8, default: 0
t.integer "MigrationTime", limit: 8, default: 0
end
add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
t.binary "ObjectName", null: false
t.binary "RestoreObject", limit: 2147483647, null: false
t.binary "PluginName", limit: 255, null: false
t.integer "ObjectLength", default: 0
t.integer "ObjectFullLength", default: 0
t.integer "ObjectIndex", default: 0
t.integer "ObjectType", default: 0
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "ObjectCompression", default: 0
end
add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
create_table "Status", primary_key: "JobStatus", force: true do |t|
t.binary "JobStatusLong"
t.integer "Severity"
end
create_table "Storage", primary_key: "StorageId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "AutoChanger", limit: 1, default: 0
end
create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
end
create_table "Version", id: false, force: true do |t|
t.integer "VersionId", null: false
end
+ create_table "hosts", force: true do |t|
+ t.binary "name", limit: 255, null: false
+ t.binary "fqdn", limit: 255, null: false
+ t.integer "port", null: false
+ t.integer "file_retention", null: false
+ t.integer "job_retention", null: false
+ t.datetime "created_at"
+ t.datetime "updated_at"
+ t.string "password"
+ t.boolean "baculized", default: false, null: false
+ t.datetime "baculized_at"
+ end
+
+ add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
+
create_table "users", force: true do |t|
t.string "username", null: false
t.string "email"
t.integer "user_type", limit: 1, null: false
t.boolean "enabled", default: false
t.datetime "created_at"
t.datetime "updated_at"
end
end
diff --git a/spec/factories/host.rb b/spec/factories/host.rb
new file mode 100644
index 0000000..df18bc3
--- /dev/null
+++ b/spec/factories/host.rb
@@ -0,0 +1,10 @@
+FactoryGirl.define do
+ factory :host do
+ sequence(:fqdn) {|n| "lala.#{n}-#{rand(1000)}.gr" }
+ password 'a strong password'
+ port 1234
+ file_retention 1
+ job_retention 2
+ baculized false
+ end
+end
diff --git a/spec/models/host_spec.rb b/spec/models/host_spec.rb
new file mode 100644
index 0000000..3292fc0
--- /dev/null
+++ b/spec/models/host_spec.rb
@@ -0,0 +1,70 @@
+require 'spec_helper'
+
+describe Host do
+ context 'validates' do
+ [:name, :password].each do |field|
+ it "presence of #{field}" do
+ expect(Host.new).to have(1).errors_on(field)
+ end
+ end
+
+ it 'numericality of :port' do
+ expect(Host.new(port: :lala)).to have(2).errors_on(:port)
+ end
+
+ [:file_retention, :job_retention].each do |field|
+ it "#{field} is set automatically" do
+ host = Host.new
+ host.valid?
+ expect(host.send(field)).to be_present
+ end
+ end
+ end
+
+ context 'when fqdn is invalid' do
+ let(:host) { FactoryGirl.build(:host, fqdn: :lala) }
+
+ it 'has errors' do
+ expect(host).to have(1).errors_on(:fqdn)
+ end
+ end
+
+ describe '#to_bacula_config_array' do
+ let(:host) { FactoryGirl.create(:host) }
+
+ it "is a valid client directive" do
+ expect(host.to_bacula_config_array).to include('Client {')
+ expect(host.to_bacula_config_array).to include('}')
+ end
+
+ it "contains Address directive" do
+ expect(host.to_bacula_config_array).to include(" Address = #{host.fqdn}")
+ end
+
+ it "contains FDPort directive" do
+ expect(host.to_bacula_config_array).to include(" FDPort = #{host.port}")
+ end
+
+ it "contains Catalog directive" do
+ expect(host.to_bacula_config_array).to include(" Catalog = #{Host::CATALOG}")
+ end
+
+ it "contains Password directive" do
+ expect(host.to_bacula_config_array).to include(" Password = \"#{host.password}\"")
+ end
+
+ it "contains File Retention directive" do
+ expect(host.to_bacula_config_array).
+ to include(" File Retention = #{host.file_retention} days")
+ end
+
+ it "contains Job Retention directive" do
+ expect(host.to_bacula_config_array).
+ to include(" Job Retention = #{host.job_retention} days")
+ end
+
+ it "contains AutoPrune directive" do
+ expect(host.to_bacula_config_array).to include(" AutoPrune = yes")
+ end
+ end
+end

Event Timeline