diff --git a/app/assets/javascripts/hosts.js b/app/assets/javascripts/hosts.js
index 3b2d2a0..0a2dfde 100644
--- a/app/assets/javascripts/hosts.js
+++ b/app/assets/javascripts/hosts.js
@@ -1,5 +1,8 @@
$(document).ready(function() {
if ($('select#host_fqdn').size() > 0) {
$('#host_fqdn').chosen();
}
+ if ($('select#host_email_recipients').size() > 0) {
+ $('#host_email_recipients').chosen();
+ }
});
diff --git a/app/controllers/hosts_controller.rb b/app/controllers/hosts_controller.rb
index eb05f8d..f306d02 100644
--- a/app/controllers/hosts_controller.rb
+++ b/app/controllers/hosts_controller.rb
@@ -1,176 +1,177 @@
class HostsController < ApplicationController
before_action :require_logged_in
before_action :fetch_host, only: [:show, :edit, :update, :destroy, :submit_config,
:revoke, :disable]
before_action :fetch_hosts_of_user, only: [:new, :edit, :create]
# GET /hosts/new
def new
@host = Host.new
@host.port = 9102
+ @host.email_recipients = [current_user.email]
end
# POST /hosts
def create
@host = Host.new(fetch_params)
set_host_type
@host.verified = !@host.institutional?
if user_can_add_this_host? && @host.save
flash[:success] = 'Host created successfully'
current_user.hosts << @host
UserMailer.notify_admin(current_user, @host.fqdn).deliver
redirect_to host_path @host
else
flash[:error] = 'Host was not created'
render :new
end
end
# GET /hosts/1
def show
@schedules = @host.job_templates.map(&:schedule)
@filesets = @host.job_templates.map(&:fileset)
end
# GET /hosts/1/edit
def edit; end
# PATCH /hosts/1
def update
- updates = fetch_params.slice(:port, :password)
+ updates = fetch_params.slice(:port, :password, :email_recipients)
if updates.present? && @host.update_attributes(updates)
@host.recalculate if @host.bacula_ready?
flash[:success] = 'Host updated successfully. You must update your file deamon accordingly.'
redirect_to host_path @host
else
render :edit
end
end
# DELETE /hosts/1
def destroy
if @host.destroy
flash[:success] = 'Host destroyed successfully'
else
flash[:error] = 'Host not destroyed'
end
redirect_to root_path
end
# POST /hosts/1/disable
def disable
if @host.disable_jobs_and_update
flash[:success] = 'Client disabled'
else
flash[:error] = 'Something went wrong, try again later'
end
redirect_to host_path(@host)
end
# POST /hosts/1/submit_config
def submit_config
if @host.dispatch_to_bacula
flash[:success] = 'Host configuration sent to Bacula successfully'
else
flash[:error] = 'Something went wrong, try again later'
end
redirect_to host_path(@host)
end
# DELETE /hosts/1/revoke
def revoke
if @host.remove_from_bacula
flash[:success] = 'Host configuration removed from Bacula successfully'
else
flash[:error] = 'Something went wrong, try again later'
end
redirect_to root_path
end
# GET /hosts/fetch_vima_hosts
def fetch_vima_hosts
if params[:code].blank?
return redirect_to client.auth_code.authorize_url(:redirect_uri => redirect_uri,
scope: 'read')
end
access_token = client.auth_code.get_token(
params['code'],
{ :redirect_uri => redirect_uri },
{ :mode => :query, :param_name => "access_token", :header_format => "" }
)
vms = access_token.get(
'https://vima.grnet.gr/instances/list?tag=vima:service:archiving',
{ mode: :query, param_name: 'access_token' }
).parsed.deep_symbolize_keys[:response][:instances]
session[:vms] = vms.first(50)
current_user.temp_hosts = vms
current_user.hosts_updated_at = Time.now
current_user.save
Host.where(fqdn: vms).each do |host|
host.users << current_user unless host.users.include?(current_user)
end
redirect_to new_host_path
end
private
def client
OAuth2::Client.new(
Rails.application.secrets.oauth2_vima_client_id,
Rails.application.secrets.oauth2_vima_secret,
site: 'https://vima.grnet.gr',
token_url: "/o/token",
authorize_url: "/o/authorize",
:ssl => {:ca_path => "/etc/ssl/certs"}
)
end
def redirect_uri
uri = URI.parse(request.url)
uri.scheme = 'https' unless Rails.env.development?
uri.path = '/hosts/fetch_vima_hosts'
uri.query = nil
uri.to_s
end
def fetch_hosts_of_user
return if not current_user.needs_host_list?
@hosts_of_user = session[:vms] - current_user.hosts.pluck(:fqdn)
end
def fetch_host
@host = current_user.hosts.includes(job_templates: [:fileset, :schedule]).find(params[:id])
end
def fetch_params
- params.require(:host).permit(:fqdn, :port, :password)
+ params.require(:host).permit(:fqdn, :port, :password, email_recipients: [])
end
def user_can_add_this_host?
!current_user.needs_host_list? || @hosts_of_user.include?(@host.fqdn)
end
def set_host_type
@host.origin = if current_user.vima?
:vima
elsif current_user.okeanos?
:okeanos
else
:institutional
end
end
end
diff --git a/app/models/host.rb b/app/models/host.rb
index 0f6a8a5..a5b7c05 100644
--- a/app/models/host.rb
+++ b/app/models/host.rb
@@ -1,298 +1,311 @@
# The bacula database must be independent from all of our application logic.
# For this reason we have Host which is the application equivalent of a Bacula Client.
#
# A host is being created from our application. When it receives all the configuration
# which is required it gets dispatched to bacula through some configuration files. After
# that, a client with the exact same config is generated by bacula.
class Host < ActiveRecord::Base
include Configuration::Host
STATUSES = {
pending: 0,
configured: 1,
dispatched: 2,
deployed: 3,
updated: 4,
redispatched: 5,
for_removal: 6,
inactive: 7,
blocked: 8
}
enum origin: { institutional: 0, vima: 1, okeanos: 2 }
+ serialize :email_recipients, JSON
has_many :ownerships
has_many :users, through: :ownerships, inverse_of: :hosts
has_many :invitations
belongs_to :client, class_name: :Client, foreign_key: :name, primary_key: :name
belongs_to :verifier, class_name: :User, foreign_key: :verifier_id, primary_key: :id
has_many :filesets, dependent: :destroy
has_many :job_templates, dependent: :destroy
has_many :schedules, dependent: :destroy
validates :file_retention, :job_retention,
:port, :password, presence: true
validates :port, numericality: true
validates :fqdn, presence: true, uniqueness: true
validate :fqdn_format
+ validate :valid_recipients
+
scope :not_baculized, -> {
joins("left join Client on Client.Name = hosts.name").where(Client: { Name: nil })
}
scope :in_bacula, -> {
where(
status: STATUSES.select { |k,_|
[:deployed, :updated, :redispatched, :for_removal].include? k
}.values
)
}
scope :unverified, -> { where(verified: false) }
- before_validation :set_retention, :unset_baculized, :sanitize_name
+ before_validation :set_retention, :unset_baculized, :sanitize_name, :sanitize_email_recipients
state_machine :status, initial: :pending do
STATUSES.each do |status_name, value|
state status_name, value: value
end
after_transition [:dispatched, :redispatched, :configured, :updated] => :deployed do |host|
host.job_templates.enabled.
update_all(baculized: true, baculized_at: Time.now, updated_at: Time.now)
end
event :add_configuration do
transition [:pending, :dispatched, :inactive] => :configured
end
event :dispatch do
transition :configured => :dispatched
end
event :redispatch do
transition :updated => :redispatched
end
event :set_deployed do
transition [:dispatched, :redispatched, :configured, :updated] => :deployed
end
event :change_deployed_config do
transition [:deployed, :redispatched, :for_removal] => :updated
end
event :mark_for_removal do
transition [:dispatched, :deployed, :updated, :redispatched] => :for_removal
end
event :set_inactive do
transition [:deployed, :dispatched, :updated, :redispatched] => :inactive
end
event :disable do
transition all => :pending
end
event :block do
transition all - [:blocked] => :blocked
end
event :unblock do
transition :blocked => :pending
end
end
# Determines if a host has enabled jobs in order to be dispatched to Bacula
#
# @return [Boolean]
def bacula_ready?
job_templates.enabled.any?
end
# Shows the host's auto_prune setting
def auto_prune_human
client_settings[:autoprune]
end
# Uploads the host's config to bacula
# Reloads bacula server
#
# It updates the host's status accordingly
def dispatch_to_bacula
return false if not needs_dispatch?
bacula_handler.deploy_config
end
# Removes a Host from bacula configuration.
# Reloads bacula server
#
# If all go well it changes the host's status and returns true
#
# @param force[Boolean] forces removal
def remove_from_bacula(force=false)
return false if not (force || needs_revoke?)
bacula_handler.undeploy_config
end
# Restores a host's backup to a preselected location
#
# @param fileset_id[Integer] the desired fileset
# @param location[String] the desired restore location
# @param restore_point[Datetime] the desired restore_point datetime
def restore(file_set_id, location, restore_point=nil)
return false if not restorable?
job_ids = client.get_job_ids(file_set_id, restore_point)
file_set_name = FileSet.find(file_set_id).file_set
bacula_handler.restore(job_ids, file_set_name, restore_point, location)
end
# Runs the given backup job ASAP
def backup_now(job_name)
bacula_handler.backup_now(job_name)
end
# Disables all jobs and sends the configuration to Bacula
def disable_jobs_and_update
job_templates.update_all(enabled: false)
bacula_handler.deploy_config
end
# Disables all jobs if needed and then locks the host
def disable_jobs_and_lock
return false if can_set_inactive? && !disable_jobs_and_update
block
end
# Determinex weather a host:
#
# * has all it takes to be deployed but
# * the config is not yet sent to bacula
#
# @return [Boolean]
def needs_dispatch?
verified? && (can_dispatch? || can_redispatch?)
end
# Determines weather a host is marked for removal
#
# @return [Boolean]
def needs_revoke?
for_removal?
end
# Handles the host's job changes by updating the host's status
def recalculate
add_configuration || change_deployed_config
end
# Fetches an info message concerning the host's deploy status
def display_message
if !verified?
{ message: 'Your host needs to be verified by an admin', severity: :alert }
elsif pending?
{ message: 'client not configured yet', severity: :alert }
elsif configured? || dispatched?
{ message: 'client not deployed to Bacula', severity: :alert }
elsif updated? || redispatched?
{ message: 'client configuration changed, deploy needed', severity: :alert }
elsif for_removal?
{ message: 'pending client configuration withdraw', severity: :error }
elsif inactive?
{ message: 'client disabled', severity: :alert }
elsif blocked?
{ message: 'client disabled by admin.', severity: :error }
end
end
# Determines if a host can issue a restore job.
#
# @returns [Boolean] true if the host's client can issue a restore job
def restorable?
client.present? && client.is_backed_up?
end
# @return [User] the first of the host's users
def first_user
users.order('ownerships.created_at asc').first
end
# Marks the host as verified and sets the relevant metadata
#
# @param admin_verifier[Integer] the verifier's id
def verify(admin_verifier)
self.verified = true
self.verifier_id = admin_verifier
self.verified_at = Time.now
recipients = users.pluck(:email)
if save
UserMailer.notify_for_verification(recipients, name).deliver if recipients.any?
return true
end
false
end
# Determines if a host can be disabled or not.
# Equivalent to is_deployed
#
# @return [Boolean]
def can_be_disabled?
dispatched? || deployed? || updated? || redispatched?
end
# Determines if a host is inserted manually from the user or
# provided as an option from a list by the system via a third party
# like ViMa or Okeanos
#
# @return [Boolean]
def manually_inserted?
institutional?
end
private
# automatic setters
def sanitize_name
self.name = fqdn
end
# Sets the file and job retention according to the global settings
def set_retention
self.file_retention = client_settings[:file_retention]
self.file_retention_period_type = client_settings[:file_retention_period_type]
self.job_retention = client_settings[:job_retention]
self.job_retention_period_type = client_settings[:job_retention_period_type]
end
def unset_baculized
self.baculized = false if new_record?
true
end
+ def sanitize_email_recipients
+ self.email_recipients.reject!(&:blank?)
+ end
+
# validation
def fqdn_format
regex = /(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?
<% if current_user.needs_host_list? %>
<%= f.select :fqdn, options_for_select(@hosts_of_user, @host.fqdn), {},
disabled: @host.persisted? %>
<% else %>
<%= f.text_field :fqdn, disabled: @host.persisted? %>
<% end %>
- <%= f.password_field :password %>
+ <%= f.password_field :password, required: true %>
<%= f.number_field :port, min: 1 %>
+ <% emails = (@host.users.pluck(:email) + @host.email_recipients).uniq.select(&:present?) %>
+ <%= f.select :email_recipients, options_for_select(emails, @host.email_recipients), {},
+ multiple: true %>
-
<% end %>
diff --git a/db/migrate/20160131180912_add_mail_recipients_to_host.rb b/db/migrate/20160131180912_add_mail_recipients_to_host.rb
new file mode 100644
index 0000000..ca66f88
--- /dev/null
+++ b/db/migrate/20160131180912_add_mail_recipients_to_host.rb
@@ -0,0 +1,9 @@
+class AddMailRecipientsToHost < ActiveRecord::Migration
+ def up
+ add_column :hosts, :email_recipients, :string, default: [].to_json
+ end
+
+ def down
+ remove_column :hosts, :email_recipients
+ end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 865db40..2c412bd 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,440 +1,441 @@
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20160131105935) do
+ActiveRecord::Schema.define(version: 20160131180912) do
create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
t.integer "BaseJobId", null: false
t.integer "JobId", null: false
t.integer "FileId", limit: 8, null: false
t.integer "FileIndex"
end
add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
create_table "CDImages", primary_key: "MediaId", force: true do |t|
t.datetime "LastBurn", null: false
end
create_table "Client", primary_key: "ClientId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.binary "Uname", limit: 255, null: false
t.integer "AutoPrune", limit: 1, default: 0
t.integer "FileRetention", limit: 8, default: 0
t.integer "JobRetention", limit: 8, default: 0
end
add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "Counters", id: false, force: true do |t|
t.binary "Counter", limit: 255, null: false
t.integer "MinValue", default: 0
t.integer "MaxValue", default: 0
t.integer "CurrentValue", default: 0
t.binary "WrapCounter", limit: 255, null: false
end
create_table "Device", primary_key: "DeviceId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "StorageId", default: 0
t.integer "DevMounts", default: 0
t.integer "DevReadBytes", limit: 8, default: 0
t.integer "DevWriteBytes", limit: 8, default: 0
t.integer "DevReadBytesSinceCleaning", limit: 8, default: 0
t.integer "DevWriteBytesSinceCleaning", limit: 8, default: 0
t.integer "DevReadTime", limit: 8, default: 0
t.integer "DevWriteTime", limit: 8, default: 0
t.integer "DevReadTimeSinceCleaning", limit: 8, default: 0
t.integer "DevWriteTimeSinceCleaning", limit: 8, default: 0
t.datetime "CleaningDate"
t.integer "CleaningPeriod", limit: 8, default: 0
end
create_table "File", primary_key: "FileId", force: true do |t|
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
t.integer "DeltaSeq", limit: 2, default: 0
t.integer "MarkId", default: 0
t.binary "LStat", limit: 255, null: false
t.binary "MD5", limit: 255
end
add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
add_index "File", ["JobId"], name: "JobId", using: :btree
create_table "FileSet", primary_key: "FileSetId", force: true do |t|
t.binary "FileSet", limit: 255, null: false
t.binary "MD5", limit: 255
t.datetime "CreateTime"
end
create_table "Filename", primary_key: "FilenameId", force: true do |t|
t.binary "Name", null: false
end
add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
create_table "Job", primary_key: "JobId", force: true do |t|
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
create_table "JobHisto", id: false, force: true do |t|
t.integer "JobId", null: false
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
t.integer "JobId", null: false
t.integer "MediaId", null: false
t.integer "FirstIndex", default: 0
t.integer "LastIndex", default: 0
t.integer "StartFile", default: 0
t.integer "EndFile", default: 0
t.integer "StartBlock", default: 0
t.integer "EndBlock", default: 0
t.integer "VolIndex", default: 0
end
add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
create_table "Location", primary_key: "LocationId", force: true do |t|
t.binary "Location", limit: 255, null: false
t.integer "Cost", default: 0
t.integer "Enabled", limit: 1
end
create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
t.datetime "Date"
t.binary "Comment", null: false
t.integer "MediaId", default: 0
t.integer "LocationId", default: 0
t.string "NewVolStatus", limit: 9, null: false
t.integer "NewEnabled", limit: 1
end
create_table "Log", primary_key: "LogId", force: true do |t|
t.integer "JobId", default: 0
t.datetime "Time"
t.binary "LogText", null: false
end
add_index "Log", ["JobId"], name: "JobId", using: :btree
create_table "Media", primary_key: "MediaId", force: true do |t|
t.binary "VolumeName", limit: 255, null: false
t.integer "Slot", default: 0
t.integer "PoolId", default: 0
t.binary "MediaType", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "LabelType", limit: 1, default: 0
t.datetime "FirstWritten"
t.datetime "LastWritten"
t.datetime "LabelDate"
t.integer "VolJobs", default: 0
t.integer "VolFiles", default: 0
t.integer "VolBlocks", default: 0
t.integer "VolMounts", default: 0
t.integer "VolBytes", limit: 8, default: 0
t.integer "VolParts", default: 0
t.integer "VolErrors", default: 0
t.integer "VolWrites", default: 0
t.integer "VolCapacityBytes", limit: 8, default: 0
t.string "VolStatus", limit: 9, null: false
t.integer "Enabled", limit: 1, default: 1
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "InChanger", limit: 1, default: 0
t.integer "StorageId", default: 0
t.integer "DeviceId", default: 0
t.integer "MediaAddressing", limit: 1, default: 0
t.integer "VolReadTime", limit: 8, default: 0
t.integer "VolWriteTime", limit: 8, default: 0
t.integer "EndFile", default: 0
t.integer "EndBlock", default: 0
t.integer "LocationId", default: 0
t.integer "RecycleCount", default: 0
t.datetime "InitialWrite"
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.binary "Comment"
end
add_index "Media", ["PoolId"], name: "PoolId", using: :btree
add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
t.binary "MediaType", limit: 255, null: false
t.integer "ReadOnly", limit: 1, default: 0
end
create_table "Path", primary_key: "PathId", force: true do |t|
t.binary "Path", null: false
end
add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
t.integer "PPathId", null: false
end
add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
create_table "PathVisibility", id: false, force: true do |t|
t.integer "PathId", null: false
t.integer "JobId", null: false
t.integer "Size", limit: 8, default: 0
t.integer "Files", default: 0
end
add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
create_table "Pool", primary_key: "PoolId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "NumVols", default: 0
t.integer "MaxVols", default: 0
t.integer "UseOnce", limit: 1, default: 0
t.integer "UseCatalog", limit: 1, default: 0
t.integer "AcceptAnyVolume", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "AutoPrune", limit: 1, default: 0
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.string "PoolType", limit: 9, null: false
t.integer "LabelType", limit: 1, default: 0
t.binary "LabelFormat", limit: 255
t.integer "Enabled", limit: 1, default: 1
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.integer "NextPoolId", default: 0
t.integer "MigrationHighBytes", limit: 8, default: 0
t.integer "MigrationLowBytes", limit: 8, default: 0
t.integer "MigrationTime", limit: 8, default: 0
end
add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
t.binary "ObjectName", null: false
t.binary "RestoreObject", limit: 2147483647, null: false
t.binary "PluginName", limit: 255, null: false
t.integer "ObjectLength", default: 0
t.integer "ObjectFullLength", default: 0
t.integer "ObjectIndex", default: 0
t.integer "ObjectType", default: 0
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "ObjectCompression", default: 0
end
add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
create_table "Status", primary_key: "JobStatus", force: true do |t|
t.binary "JobStatusLong"
t.integer "Severity"
end
create_table "Storage", primary_key: "StorageId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "AutoChanger", limit: 1, default: 0
end
create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
end
create_table "Version", id: false, force: true do |t|
t.integer "VersionId", null: false
end
create_table "configuration_settings", force: true do |t|
t.string "job", default: "{}"
t.string "client", default: "{}"
t.datetime "created_at"
t.datetime "updated_at"
t.string "pool", default: "{}"
end
create_table "filesets", force: true do |t|
t.string "name"
t.integer "host_id"
t.text "exclude_directions"
t.text "include_directions"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "filesets", ["host_id"], name: "index_filesets_on_host_id", using: :btree
create_table "hosts", force: true do |t|
t.binary "name", limit: 255, null: false
t.binary "fqdn", limit: 255, null: false
t.integer "port", null: false
t.integer "file_retention", null: false
t.integer "job_retention", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "password"
t.boolean "baculized", default: false, null: false
t.datetime "baculized_at"
t.integer "status", limit: 1, default: 0
t.integer "client_id"
t.boolean "verified", default: false
t.datetime "verified_at"
t.integer "verifier_id"
t.string "job_retention_period_type"
t.string "file_retention_period_type"
t.integer "origin", limit: 1
+ t.string "email_recipients", default: "[]"
end
add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
create_table "invitations", force: true do |t|
t.integer "user_id"
t.integer "host_id"
t.string "verification_code"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "invitations", ["user_id", "verification_code"], name: "index_invitations_on_user_id_and_verification_code", using: :btree
create_table "job_templates", force: true do |t|
t.string "name", null: false
t.integer "job_type", limit: 1
t.integer "host_id"
t.integer "fileset_id"
t.integer "schedule_id"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "enabled", default: false
t.binary "restore_location"
t.boolean "baculized", default: false
t.datetime "baculized_at"
t.string "client_before_run_file"
t.string "client_after_run_file"
end
create_table "ownerships", force: true do |t|
t.integer "user_id"
t.integer "host_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "schedule_runs", force: true do |t|
t.integer "schedule_id"
t.integer "level", limit: 1
t.string "month"
t.string "day"
t.string "time"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "schedule_runs", ["schedule_id"], name: "index_schedule_runs_on_schedule_id", using: :btree
create_table "schedules", force: true do |t|
t.string "name"
t.string "runs"
t.integer "host_id"
end
add_index "schedules", ["host_id"], name: "index_schedules_on_host_id", using: :btree
create_table "users", force: true do |t|
t.string "username", null: false
t.string "email"
t.integer "user_type", limit: 1, null: false
t.boolean "enabled", default: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "identifier"
t.string "password_hash"
t.datetime "login_at"
t.datetime "hosts_updated_at"
t.string "temp_hosts", default: "[]"
end
add_index "users", ["identifier"], name: "index_users_on_identifier", using: :btree
add_index "users", ["password_hash"], name: "index_users_on_password_hash", using: :btree
end