Page Menu
Home
GRNET
Search
Configure Global Search
Log In
Files
F904883
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Fri, Aug 29, 4:28 PM
Size
32 KB
Mime Type
text/x-diff
Expires
Sun, Aug 31, 4:28 PM (1 d, 3 h)
Engine
blob
Format
Raw Data
Handle
252300
Attached To
rARCHIVING archiving
View Options
diff --git a/app/models/host.rb b/app/models/host.rb
index cf6c7dc..0f6a8a5 100644
--- a/app/models/host.rb
+++ b/app/models/host.rb
@@ -1,297 +1,298 @@
# The bacula database must be independent from all of our application logic.
# For this reason we have Host which is the application equivalent of a Bacula Client.
#
# A host is being created from our application. When it receives all the configuration
# which is required it gets dispatched to bacula through some configuration files. After
# that, a client with the exact same config is generated by bacula.
class Host < ActiveRecord::Base
include Configuration::Host
STATUSES = {
pending: 0,
configured: 1,
dispatched: 2,
deployed: 3,
updated: 4,
redispatched: 5,
for_removal: 6,
inactive: 7,
blocked: 8
}
enum origin: { institutional: 0, vima: 1, okeanos: 2 }
has_many :ownerships
has_many :users, through: :ownerships, inverse_of: :hosts
+ has_many :invitations
belongs_to :client, class_name: :Client, foreign_key: :name, primary_key: :name
belongs_to :verifier, class_name: :User, foreign_key: :verifier_id, primary_key: :id
has_many :filesets, dependent: :destroy
has_many :job_templates, dependent: :destroy
has_many :schedules, dependent: :destroy
validates :file_retention, :job_retention,
:port, :password, presence: true
validates :port, numericality: true
validates :fqdn, presence: true, uniqueness: true
validate :fqdn_format
scope :not_baculized, -> {
joins("left join Client on Client.Name = hosts.name").where(Client: { Name: nil })
}
scope :in_bacula, -> {
where(
status: STATUSES.select { |k,_|
[:deployed, :updated, :redispatched, :for_removal].include? k
}.values
)
}
scope :unverified, -> { where(verified: false) }
before_validation :set_retention, :unset_baculized, :sanitize_name
state_machine :status, initial: :pending do
STATUSES.each do |status_name, value|
state status_name, value: value
end
after_transition [:dispatched, :redispatched, :configured, :updated] => :deployed do |host|
host.job_templates.enabled.
update_all(baculized: true, baculized_at: Time.now, updated_at: Time.now)
end
event :add_configuration do
transition [:pending, :dispatched, :inactive] => :configured
end
event :dispatch do
transition :configured => :dispatched
end
event :redispatch do
transition :updated => :redispatched
end
event :set_deployed do
transition [:dispatched, :redispatched, :configured, :updated] => :deployed
end
event :change_deployed_config do
transition [:deployed, :redispatched, :for_removal] => :updated
end
event :mark_for_removal do
transition [:dispatched, :deployed, :updated, :redispatched] => :for_removal
end
event :set_inactive do
transition [:deployed, :dispatched, :updated, :redispatched] => :inactive
end
event :disable do
transition all => :pending
end
event :block do
transition all - [:blocked] => :blocked
end
event :unblock do
transition :blocked => :pending
end
end
# Determines if a host has enabled jobs in order to be dispatched to Bacula
#
# @return [Boolean]
def bacula_ready?
job_templates.enabled.any?
end
# Shows the host's auto_prune setting
def auto_prune_human
client_settings[:autoprune]
end
# Uploads the host's config to bacula
# Reloads bacula server
#
# It updates the host's status accordingly
def dispatch_to_bacula
return false if not needs_dispatch?
bacula_handler.deploy_config
end
# Removes a Host from bacula configuration.
# Reloads bacula server
#
# If all go well it changes the host's status and returns true
#
# @param force[Boolean] forces removal
def remove_from_bacula(force=false)
return false if not (force || needs_revoke?)
bacula_handler.undeploy_config
end
# Restores a host's backup to a preselected location
#
# @param fileset_id[Integer] the desired fileset
# @param location[String] the desired restore location
# @param restore_point[Datetime] the desired restore_point datetime
def restore(file_set_id, location, restore_point=nil)
return false if not restorable?
job_ids = client.get_job_ids(file_set_id, restore_point)
file_set_name = FileSet.find(file_set_id).file_set
bacula_handler.restore(job_ids, file_set_name, restore_point, location)
end
# Runs the given backup job ASAP
def backup_now(job_name)
bacula_handler.backup_now(job_name)
end
# Disables all jobs and sends the configuration to Bacula
def disable_jobs_and_update
job_templates.update_all(enabled: false)
bacula_handler.deploy_config
end
# Disables all jobs if needed and then locks the host
def disable_jobs_and_lock
return false if can_set_inactive? && !disable_jobs_and_update
block
end
# Determinex weather a host:
#
# * has all it takes to be deployed but
# * the config is not yet sent to bacula
#
# @return [Boolean]
def needs_dispatch?
verified? && (can_dispatch? || can_redispatch?)
end
# Determines weather a host is marked for removal
#
# @return [Boolean]
def needs_revoke?
for_removal?
end
# Handles the host's job changes by updating the host's status
def recalculate
add_configuration || change_deployed_config
end
# Fetches an info message concerning the host's deploy status
def display_message
if !verified?
{ message: 'Your host needs to be verified by an admin', severity: :alert }
elsif pending?
{ message: 'client not configured yet', severity: :alert }
elsif configured? || dispatched?
{ message: 'client not deployed to Bacula', severity: :alert }
elsif updated? || redispatched?
{ message: 'client configuration changed, deploy needed', severity: :alert }
elsif for_removal?
{ message: 'pending client configuration withdraw', severity: :error }
elsif inactive?
{ message: 'client disabled', severity: :alert }
elsif blocked?
{ message: 'client disabled by admin.', severity: :error }
end
end
# Determines if a host can issue a restore job.
#
# @returns [Boolean] true if the host's client can issue a restore job
def restorable?
client.present? && client.is_backed_up?
end
# @return [User] the first of the host's users
def first_user
users.order('ownerships.created_at asc').first
end
# Marks the host as verified and sets the relevant metadata
#
# @param admin_verifier[Integer] the verifier's id
def verify(admin_verifier)
self.verified = true
self.verifier_id = admin_verifier
self.verified_at = Time.now
recipients = users.pluck(:email)
if save
UserMailer.notify_for_verification(recipients, name).deliver if recipients.any?
return true
end
false
end
# Determines if a host can be disabled or not.
# Equivalent to is_deployed
#
# @return [Boolean]
def can_be_disabled?
dispatched? || deployed? || updated? || redispatched?
end
# Determines if a host is inserted manually from the user or
# provided as an option from a list by the system via a third party
# like ViMa or Okeanos
#
# @return [Boolean]
def manually_inserted?
institutional?
end
private
# automatic setters
def sanitize_name
self.name = fqdn
end
# Sets the file and job retention according to the global settings
def set_retention
self.file_retention = client_settings[:file_retention]
self.file_retention_period_type = client_settings[:file_retention_period_type]
self.job_retention = client_settings[:job_retention]
self.job_retention_period_type = client_settings[:job_retention_period_type]
end
def unset_baculized
self.baculized = false if new_record?
true
end
# validation
def fqdn_format
regex = /(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)/
unless fqdn =~ regex
self.errors.add(:fqdn)
end
end
# Proxy object for handling bacula directives
def bacula_handler
BaculaHandler.new(self)
end
# Fetches and memoizes the general configuration settings for Clients
#
# @see ConfigurationSetting.current_client_settings
# @return [Hash] containing the settings
def client_settings
@client_settings ||= ConfigurationSetting.current_client_settings
end
end
diff --git a/app/models/invitation.rb b/app/models/invitation.rb
new file mode 100644
index 0000000..344cb78
--- /dev/null
+++ b/app/models/invitation.rb
@@ -0,0 +1,17 @@
+# `Invitation` describes the pending invitation of a user to join a host.
+class Invitation < ActiveRecord::Base
+ belongs_to :user
+ belongs_to :host
+
+ validates :user, :host, :verification_code, presence: true
+
+ before_validation :calculate_verification_code
+
+ private
+
+ def calculate_verification_code
+ self.verification_code = Digest::SHA256.hexdigest(
+ [host.name, Time.now.to_s, Rails.application.secrets.salt].join
+ )
+ end
+end
diff --git a/app/models/user.rb b/app/models/user.rb
index 614d625..ca44ac2 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -1,109 +1,110 @@
class User < ActiveRecord::Base
attr_accessor :password, :retype_password
has_many :ownerships
has_many :hosts, through: :ownerships, inverse_of: :users
+ has_many :invitations
enum user_type: { institutional: 0, vima: 1, okeanos: 2, admin: 3 }
validates :user_type, presence: true
validates :username, presence: true, uniqueness: { scope: :user_type }
validates :email, presence: true, uniqueness: { scope: :user_type }
before_create :confirm_passwords, if: :admin?
# Returns an admin user with the given password
#
# @param username[String] username from user input
# @param a_password[String] password from user input
#
# @return [User] the admin user or nil
def self.fetch_admin_with_password(username, a_password)
hashed_pass = Digest::SHA256.hexdigest(a_password + Rails.application.secrets.salt)
admin = User.admin.find_by_username_and_password_hash(username, hashed_pass)
admin
end
# Composes the user's display name from the user's username and email
#
# @return [String]
def display_name
"#{username} <#{email}>"
end
# Determines if the user must select hosts from a list or enter their
# FQDN manually
#
# @return [Boolean]
def needs_host_list?
vima? || okeanos?
end
# Determines if the user is editable or not.
# Editable users are only admin users, all others come from 3rd party authorization
#
# @return [Boolean]
def editable?
admin?
end
# Marks a user as not enabled
def ban
self.enabled = false
save
end
# Marks a user as enabled
def unban
self.enabled = true
save
end
# Stores a hashed password as a password_hash
#
# @param a_password[String] the user submitted password
#
# @return [Boolean] the save exit status
def add_password(a_password)
self.password_hash = Digest::SHA256.hexdigest(a_password + Rails.application.secrets.salt)
self.save
end
# Fetches the user's unverified hosts
#
# @return [Array] of Strings containing the hosts' names
def unverified_hosts
hosts.unverified.pluck(:name)
end
# Fetches the user's hosts that are being backed up by bacula
#
# @return [Array] of Strings configuration the host's names
def baculized_hosts
hosts.in_bacula.pluck(:name)
end
# Fetches the user's hosts that are NOT being backed up by bacula
#
# @return [Array] of Strings configuration the host's names
def non_baculized_hosts
hosts.not_baculized.pluck(:name)
end
private
def confirm_passwords
if password.blank?
self.errors.add(:password, 'Must give a password')
return false
end
if password != retype_password
self.errors.add(:password, 'Passwords mismatch')
self.errors.add(:retype_password, 'Passwords mismatch')
return false
end
true
end
end
diff --git a/db/migrate/20160124191156_create_invitations.rb b/db/migrate/20160124191156_create_invitations.rb
new file mode 100644
index 0000000..d8f5e15
--- /dev/null
+++ b/db/migrate/20160124191156_create_invitations.rb
@@ -0,0 +1,16 @@
+class CreateInvitations < ActiveRecord::Migration
+ def up
+ create_table :invitations do |t|
+ t.references :user
+ t.references :host
+ t.string :verification_code
+ t.timestamps
+ end
+
+ add_index :invitations, [:user_id, :verification_code]
+ end
+
+ def down
+ drop_table :invitations
+ end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 0c39d80..5bc6190 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,428 +1,438 @@
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20160121222332) do
+ActiveRecord::Schema.define(version: 20160124191156) do
create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
t.integer "BaseJobId", null: false
t.integer "JobId", null: false
t.integer "FileId", limit: 8, null: false
t.integer "FileIndex"
end
add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
create_table "CDImages", primary_key: "MediaId", force: true do |t|
t.datetime "LastBurn", null: false
end
create_table "Client", primary_key: "ClientId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.binary "Uname", limit: 255, null: false
t.integer "AutoPrune", limit: 1, default: 0
t.integer "FileRetention", limit: 8, default: 0
t.integer "JobRetention", limit: 8, default: 0
end
add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "Counters", id: false, force: true do |t|
t.binary "Counter", limit: 255, null: false
t.integer "MinValue", default: 0
t.integer "MaxValue", default: 0
t.integer "CurrentValue", default: 0
t.binary "WrapCounter", limit: 255, null: false
end
create_table "Device", primary_key: "DeviceId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "StorageId", default: 0
t.integer "DevMounts", default: 0
t.integer "DevReadBytes", limit: 8, default: 0
t.integer "DevWriteBytes", limit: 8, default: 0
t.integer "DevReadBytesSinceCleaning", limit: 8, default: 0
t.integer "DevWriteBytesSinceCleaning", limit: 8, default: 0
t.integer "DevReadTime", limit: 8, default: 0
t.integer "DevWriteTime", limit: 8, default: 0
t.integer "DevReadTimeSinceCleaning", limit: 8, default: 0
t.integer "DevWriteTimeSinceCleaning", limit: 8, default: 0
t.datetime "CleaningDate"
t.integer "CleaningPeriod", limit: 8, default: 0
end
create_table "File", primary_key: "FileId", force: true do |t|
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
t.integer "DeltaSeq", limit: 2, default: 0
t.integer "MarkId", default: 0
t.binary "LStat", limit: 255, null: false
t.binary "MD5", limit: 255
end
add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
add_index "File", ["JobId"], name: "JobId", using: :btree
create_table "FileSet", primary_key: "FileSetId", force: true do |t|
t.binary "FileSet", limit: 255, null: false
t.binary "MD5", limit: 255
t.datetime "CreateTime"
end
create_table "Filename", primary_key: "FilenameId", force: true do |t|
t.binary "Name", null: false
end
add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
create_table "Job", primary_key: "JobId", force: true do |t|
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
create_table "JobHisto", id: false, force: true do |t|
t.integer "JobId", null: false
t.binary "Job", limit: 255, null: false
t.binary "Name", limit: 255, null: false
t.binary "Type", limit: 1, null: false
t.binary "Level", limit: 1, null: false
t.integer "ClientId", default: 0
t.binary "JobStatus", limit: 1, null: false
t.datetime "SchedTime"
t.datetime "StartTime"
t.datetime "EndTime"
t.datetime "RealEndTime"
t.integer "JobTDate", limit: 8, default: 0
t.integer "VolSessionId", default: 0
t.integer "VolSessionTime", default: 0
t.integer "JobFiles", default: 0
t.integer "JobBytes", limit: 8, default: 0
t.integer "ReadBytes", limit: 8, default: 0
t.integer "JobErrors", default: 0
t.integer "JobMissingFiles", default: 0
t.integer "PoolId", default: 0
t.integer "FileSetId", default: 0
t.integer "PriorJobId", default: 0
t.integer "PurgedFiles", limit: 1, default: 0
t.integer "HasBase", limit: 1, default: 0
t.integer "HasCache", limit: 1, default: 0
t.integer "Reviewed", limit: 1, default: 0
t.binary "Comment"
end
add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
t.integer "JobId", null: false
t.integer "MediaId", null: false
t.integer "FirstIndex", default: 0
t.integer "LastIndex", default: 0
t.integer "StartFile", default: 0
t.integer "EndFile", default: 0
t.integer "StartBlock", default: 0
t.integer "EndBlock", default: 0
t.integer "VolIndex", default: 0
end
add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
create_table "Location", primary_key: "LocationId", force: true do |t|
t.binary "Location", limit: 255, null: false
t.integer "Cost", default: 0
t.integer "Enabled", limit: 1
end
create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
t.datetime "Date"
t.binary "Comment", null: false
t.integer "MediaId", default: 0
t.integer "LocationId", default: 0
t.string "NewVolStatus", limit: 9, null: false
t.integer "NewEnabled", limit: 1
end
create_table "Log", primary_key: "LogId", force: true do |t|
t.integer "JobId", default: 0
t.datetime "Time"
t.binary "LogText", null: false
end
add_index "Log", ["JobId"], name: "JobId", using: :btree
create_table "Media", primary_key: "MediaId", force: true do |t|
t.binary "VolumeName", limit: 255, null: false
t.integer "Slot", default: 0
t.integer "PoolId", default: 0
t.binary "MediaType", limit: 255, null: false
t.integer "MediaTypeId", default: 0
t.integer "LabelType", limit: 1, default: 0
t.datetime "FirstWritten"
t.datetime "LastWritten"
t.datetime "LabelDate"
t.integer "VolJobs", default: 0
t.integer "VolFiles", default: 0
t.integer "VolBlocks", default: 0
t.integer "VolMounts", default: 0
t.integer "VolBytes", limit: 8, default: 0
t.integer "VolParts", default: 0
t.integer "VolErrors", default: 0
t.integer "VolWrites", default: 0
t.integer "VolCapacityBytes", limit: 8, default: 0
t.string "VolStatus", limit: 9, null: false
t.integer "Enabled", limit: 1, default: 1
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "InChanger", limit: 1, default: 0
t.integer "StorageId", default: 0
t.integer "DeviceId", default: 0
t.integer "MediaAddressing", limit: 1, default: 0
t.integer "VolReadTime", limit: 8, default: 0
t.integer "VolWriteTime", limit: 8, default: 0
t.integer "EndFile", default: 0
t.integer "EndBlock", default: 0
t.integer "LocationId", default: 0
t.integer "RecycleCount", default: 0
t.datetime "InitialWrite"
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.binary "Comment"
end
add_index "Media", ["PoolId"], name: "PoolId", using: :btree
add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
t.binary "MediaType", limit: 255, null: false
t.integer "ReadOnly", limit: 1, default: 0
end
create_table "Path", primary_key: "PathId", force: true do |t|
t.binary "Path", null: false
end
add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
t.integer "PPathId", null: false
end
add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
create_table "PathVisibility", id: false, force: true do |t|
t.integer "PathId", null: false
t.integer "JobId", null: false
t.integer "Size", limit: 8, default: 0
t.integer "Files", default: 0
end
add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
create_table "Pool", primary_key: "PoolId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "NumVols", default: 0
t.integer "MaxVols", default: 0
t.integer "UseOnce", limit: 1, default: 0
t.integer "UseCatalog", limit: 1, default: 0
t.integer "AcceptAnyVolume", limit: 1, default: 0
t.integer "VolRetention", limit: 8, default: 0
t.integer "VolUseDuration", limit: 8, default: 0
t.integer "MaxVolJobs", default: 0
t.integer "MaxVolFiles", default: 0
t.integer "MaxVolBytes", limit: 8, default: 0
t.integer "AutoPrune", limit: 1, default: 0
t.integer "Recycle", limit: 1, default: 0
t.integer "ActionOnPurge", limit: 1, default: 0
t.string "PoolType", limit: 9, null: false
t.integer "LabelType", limit: 1, default: 0
t.binary "LabelFormat", limit: 255
t.integer "Enabled", limit: 1, default: 1
t.integer "ScratchPoolId", default: 0
t.integer "RecyclePoolId", default: 0
t.integer "NextPoolId", default: 0
t.integer "MigrationHighBytes", limit: 8, default: 0
t.integer "MigrationLowBytes", limit: 8, default: 0
t.integer "MigrationTime", limit: 8, default: 0
end
add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
t.binary "ObjectName", null: false
t.binary "RestoreObject", limit: 2147483647, null: false
t.binary "PluginName", limit: 255, null: false
t.integer "ObjectLength", default: 0
t.integer "ObjectFullLength", default: 0
t.integer "ObjectIndex", default: 0
t.integer "ObjectType", default: 0
t.integer "FileIndex", default: 0
t.integer "JobId", null: false
t.integer "ObjectCompression", default: 0
end
add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
create_table "Status", primary_key: "JobStatus", force: true do |t|
t.binary "JobStatusLong"
t.integer "Severity"
end
create_table "Storage", primary_key: "StorageId", force: true do |t|
t.binary "Name", limit: 255, null: false
t.integer "AutoChanger", limit: 1, default: 0
end
create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
t.integer "JobId", null: false
t.integer "PathId", null: false
t.integer "FilenameId", null: false
end
create_table "Version", id: false, force: true do |t|
t.integer "VersionId", null: false
end
create_table "configuration_settings", force: true do |t|
t.string "job", default: "{}"
t.string "client", default: "{}"
t.datetime "created_at"
t.datetime "updated_at"
t.string "pool", default: "{}"
end
create_table "filesets", force: true do |t|
t.string "name"
t.integer "host_id"
t.text "exclude_directions"
t.text "include_directions"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "filesets", ["host_id"], name: "index_filesets_on_host_id", using: :btree
create_table "hosts", force: true do |t|
t.binary "name", limit: 255, null: false
t.binary "fqdn", limit: 255, null: false
t.integer "port", null: false
t.integer "file_retention", null: false
t.integer "job_retention", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "password"
t.boolean "baculized", default: false, null: false
t.datetime "baculized_at"
t.integer "status", limit: 1, default: 0
t.integer "client_id"
t.boolean "verified", default: false
t.datetime "verified_at"
t.integer "verifier_id"
t.string "job_retention_period_type"
t.string "file_retention_period_type"
t.integer "origin", limit: 1
end
add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
+ create_table "invitations", force: true do |t|
+ t.integer "user_id"
+ t.integer "host_id"
+ t.string "verification_code"
+ t.datetime "created_at"
+ t.datetime "updated_at"
+ end
+
+ add_index "invitations", ["user_id", "verification_code"], name: "index_invitations_on_user_id_and_verification_code", using: :btree
+
create_table "job_templates", force: true do |t|
t.string "name", null: false
t.integer "job_type", limit: 1
t.integer "host_id"
t.integer "fileset_id"
t.integer "schedule_id"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "enabled", default: false
t.binary "restore_location"
t.boolean "baculized", default: false
t.datetime "baculized_at"
t.string "client_before_run_file"
t.string "client_after_run_file"
end
create_table "ownerships", force: true do |t|
t.integer "user_id"
t.integer "host_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "schedule_runs", force: true do |t|
t.integer "schedule_id"
t.integer "level", limit: 1
t.string "month"
t.string "day"
t.string "time"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "schedule_runs", ["schedule_id"], name: "index_schedule_runs_on_schedule_id", using: :btree
create_table "schedules", force: true do |t|
t.string "name"
t.string "runs"
t.integer "host_id"
end
add_index "schedules", ["host_id"], name: "index_schedules_on_host_id", using: :btree
create_table "users", force: true do |t|
t.string "username", null: false
t.string "email"
t.integer "user_type", limit: 1, null: false
t.boolean "enabled", default: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "identifier"
t.string "password_hash"
t.datetime "login_at"
end
add_index "users", ["identifier"], name: "index_users_on_identifier", using: :btree
add_index "users", ["password_hash"], name: "index_users_on_password_hash", using: :btree
end
Event Timeline
Log In to Comment