diff --git a/app/models/host.rb b/app/models/host.rb
index 379ca21..5165174 100644
--- a/app/models/host.rb
+++ b/app/models/host.rb
@@ -1,160 +1,163 @@
 class Host < ActiveRecord::Base
   establish_connection Baas::settings[:local_db]
 
   FILE_RETENTION_DAYS = 60
   JOB_RETENTION_DAYS = 180
   CATALOG = 'MyCatalog'
   AUTOPRUNE = 1
 
   STATUSES = {
     pending: 0,
     configured: 1,
     dispatched: 2,
     deployed: 3,
     updated: 4,
     redispatched: 5,
     for_removal: 6
   }
 
+  has_many :ownerships
+  has_many :users, through: :ownerships, inverse_of: :hosts
+
   belongs_to :client, class_name: :Client, foreign_key: :name, primary_key: :name
 
   has_many :filesets, dependent: :destroy
   has_many :job_templates, dependent: :destroy
   has_many :schedules, dependent: :destroy
 
   validates :file_retention, :job_retention,
     :port, :password, presence: true
   validates :port, numericality: true
 
   validates :fqdn, presence: true, uniqueness: true
 
   validate :fqdn_format
 
   scope :not_baculized, -> {
     joins("left join Client on Client.Name = hosts.name").where(Client: { Name: nil })
   }
 
   before_validation :set_retention, :unset_baculized, :sanitize_name
 
   state_machine :status, initial: :pending do
     STATUSES.each do |status_name, value|
       state status_name, value: value
     end
 
     event :add_configuration do
       transition [:pending, :dispatched] => :configured
     end
 
     event :dispatch do
       transition :configured => :dispatched
     end
 
     event :redispatch do
       transition :updated => :redispatched
     end
 
     event :set_deployed do
       transition [:dispatched, :redispatched, :configured, :updated] => :deployed
     end
 
     event :change_deployed_config do
       transition [:deployed, :redispatched, :for_removal] => :updated
     end
 
     event :mark_for_removal do
       transition [:dispatched, :deployed, :updated, :redispatched] => :for_removal
     end
 
     event :disable do
       transition all => :pending
     end
   end
 
   def baculize_config
     templates = job_templates.enabled.includes(:fileset, :schedule)
 
     result = [self] + templates.map {|x| [x, x.fileset, x.schedule] }.flatten.compact.uniq
     result.map(&:to_bacula_config_array)
   end
 
   def to_bacula_config_array
     [
       "Client {",
       "  Name = #{name}",
       "  Address = #{fqdn}",
       "  FDPort = #{port}",
       "  Catalog = #{CATALOG}",
       "  Password = \"#{password}\"",
       "  File Retention = #{file_retention} days",
       "  Job Retention = #{job_retention} days",
       "  AutoPrune = yes",
       "}"
     ]
   end
 
   def auto_prune_human
     AUTOPRUNE == 1 ? 'yes' : 'no'
   end
 
   # Uploads the host's config to bacula
   # Reloads bacula server
   #
   # It updates the host's status accordingly
   def dispatch_to_bacula
     return false if not needs_dispatch?
     BaculaHandler.new(self).deploy_config
   end
 
   # Removes a Host from bacula configuration.
   # Reloads bacula server
   #
   # If all go well it changes the host's status and returns true
   def remove_from_bacula
     return false unless needs_revoke?
     BaculaHandler.new(self).undeploy_config
   end
 
   def needs_dispatch?
     verified? && (can_dispatch? || can_redispatch?)
   end
 
   def needs_revoke?
     for_removal?
   end
 
   # Handles the host's job changes by updating the host's status
   def recalculate
     if job_templates(true).enabled.any?
       add_configuration || change_deployed_config
     else
       mark_for_removal || disable
     end
   end
 
   private
 
   # automatic setters
 
   def sanitize_name
     self.name = fqdn
   end
 
   def set_retention
     self.file_retention = FILE_RETENTION_DAYS
     self.job_retention = JOB_RETENTION_DAYS
   end
 
   def unset_baculized
     self.baculized = false if new_record?
     true
   end
 
   # validation
 
   def fqdn_format
     regex = /(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)/
     unless fqdn =~ regex
       self.errors.add(:fqdn)
     end
   end
 end
diff --git a/app/models/ownership.rb b/app/models/ownership.rb
new file mode 100644
index 0000000..a6c25fe
--- /dev/null
+++ b/app/models/ownership.rb
@@ -0,0 +1,6 @@
+class Ownership < ActiveRecord::Base
+  establish_connection Baas::settings[:local_db]
+
+  belongs_to :user
+  belongs_to :host
+end
diff --git a/app/models/user.rb b/app/models/user.rb
index b08600d..841a3e3 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -1,7 +1,10 @@
 class User < ActiveRecord::Base
   establish_connection Baas::settings[:local_db]
 
+  has_many :ownerships
+  has_many :hosts, through: :ownerships, inverse_of: :users
+
   enum user_type: { institutional: 0, vima: 1, okeanos: 2, admin: 3 }
 
   validates :username, :user_type, presence: true
 end
diff --git a/db/migrate/20151112212531_create_ownerships.rb b/db/migrate/20151112212531_create_ownerships.rb
new file mode 100644
index 0000000..ff789ed
--- /dev/null
+++ b/db/migrate/20151112212531_create_ownerships.rb
@@ -0,0 +1,17 @@
+class CreateOwnerships < ActiveRecord::Migration
+  def connection
+    ActiveRecord::Base.establish_connection(Baas::settings[:local_db]).connection
+  end
+
+  def up
+    create_table :ownerships do |t|
+      t.integer :user_id, index: true
+      t.integer :host_id, index: true
+      t.timestamps
+    end
+  end
+
+  def down
+    drop_table :ownerships
+  end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 1f3387e..8150c6a 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,386 +1,393 @@
 # encoding: UTF-8
 # This file is auto-generated from the current state of the database. Instead
 # of editing this file, please use the migrations feature of Active Record to
 # incrementally modify your database, and then regenerate this schema definition.
 #
 # Note that this schema.rb definition is the authoritative source for your
 # database schema. If you need to create the application database on another
 # system, you should be using db:schema:load, not running all the migrations
 # from scratch. The latter is a flawed and unsustainable approach (the more migrations
 # you'll amass, the slower it'll run and the greater likelihood for issues).
 #
 # It's strongly recommended that you check this file into your version control system.
 
-ActiveRecord::Schema.define(version: 20151106210604) do
+ActiveRecord::Schema.define(version: 20151112212531) do
 
   create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
     t.integer "BaseJobId",           null: false
     t.integer "JobId",               null: false
     t.integer "FileId",    limit: 8, null: false
     t.integer "FileIndex"
   end
 
   add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
 
   create_table "CDImages", primary_key: "MediaId", force: true do |t|
     t.datetime "LastBurn", null: false
   end
 
   create_table "Client", primary_key: "ClientId", force: true do |t|
     t.binary  "Name",          limit: 255,             null: false
     t.binary  "Uname",         limit: 255,             null: false
     t.integer "AutoPrune",     limit: 1,   default: 0
     t.integer "FileRetention", limit: 8,   default: 0
     t.integer "JobRetention",  limit: 8,   default: 0
   end
 
   add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
 
   create_table "Counters", id: false, force: true do |t|
     t.binary  "Counter",      limit: 255,             null: false
     t.integer "MinValue",                 default: 0
     t.integer "MaxValue",                 default: 0
     t.integer "CurrentValue",             default: 0
     t.binary  "WrapCounter",  limit: 255,             null: false
   end
 
   create_table "Device", primary_key: "DeviceId", force: true do |t|
     t.binary   "Name",                       limit: 255,             null: false
     t.integer  "MediaTypeId",                            default: 0
     t.integer  "StorageId",                              default: 0
     t.integer  "DevMounts",                              default: 0
     t.integer  "DevReadBytes",               limit: 8,   default: 0
     t.integer  "DevWriteBytes",              limit: 8,   default: 0
     t.integer  "DevReadBytesSinceCleaning",  limit: 8,   default: 0
     t.integer  "DevWriteBytesSinceCleaning", limit: 8,   default: 0
     t.integer  "DevReadTime",                limit: 8,   default: 0
     t.integer  "DevWriteTime",               limit: 8,   default: 0
     t.integer  "DevReadTimeSinceCleaning",   limit: 8,   default: 0
     t.integer  "DevWriteTimeSinceCleaning",  limit: 8,   default: 0
     t.datetime "CleaningDate"
     t.integer  "CleaningPeriod",             limit: 8,   default: 0
   end
 
   create_table "File", primary_key: "FileId", force: true do |t|
     t.integer "FileIndex",              default: 0
     t.integer "JobId",                              null: false
     t.integer "PathId",                             null: false
     t.integer "FilenameId",                         null: false
     t.integer "DeltaSeq",   limit: 2,   default: 0
     t.integer "MarkId",                 default: 0
     t.binary  "LStat",      limit: 255,             null: false
     t.binary  "MD5",        limit: 255
   end
 
   add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
   add_index "File", ["JobId"], name: "JobId", using: :btree
 
   create_table "FileSet", primary_key: "FileSetId", force: true do |t|
     t.binary   "FileSet",    limit: 255, null: false
     t.binary   "MD5",        limit: 255
     t.datetime "CreateTime"
   end
 
   create_table "Filename", primary_key: "FilenameId", force: true do |t|
     t.binary "Name", null: false
   end
 
   add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
 
   create_table "Job", primary_key: "JobId", force: true do |t|
     t.binary   "Job",             limit: 255,             null: false
     t.binary   "Name",            limit: 255,             null: false
     t.binary   "Type",            limit: 1,               null: false
     t.binary   "Level",           limit: 1,               null: false
     t.integer  "ClientId",                    default: 0
     t.binary   "JobStatus",       limit: 1,               null: false
     t.datetime "SchedTime"
     t.datetime "StartTime"
     t.datetime "EndTime"
     t.datetime "RealEndTime"
     t.integer  "JobTDate",        limit: 8,   default: 0
     t.integer  "VolSessionId",                default: 0
     t.integer  "VolSessionTime",              default: 0
     t.integer  "JobFiles",                    default: 0
     t.integer  "JobBytes",        limit: 8,   default: 0
     t.integer  "ReadBytes",       limit: 8,   default: 0
     t.integer  "JobErrors",                   default: 0
     t.integer  "JobMissingFiles",             default: 0
     t.integer  "PoolId",                      default: 0
     t.integer  "FileSetId",                   default: 0
     t.integer  "PriorJobId",                  default: 0
     t.integer  "PurgedFiles",     limit: 1,   default: 0
     t.integer  "HasBase",         limit: 1,   default: 0
     t.integer  "HasCache",        limit: 1,   default: 0
     t.integer  "Reviewed",        limit: 1,   default: 0
     t.binary   "Comment"
   end
 
   add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
 
   create_table "JobHisto", id: false, force: true do |t|
     t.integer  "JobId",                                   null: false
     t.binary   "Job",             limit: 255,             null: false
     t.binary   "Name",            limit: 255,             null: false
     t.binary   "Type",            limit: 1,               null: false
     t.binary   "Level",           limit: 1,               null: false
     t.integer  "ClientId",                    default: 0
     t.binary   "JobStatus",       limit: 1,               null: false
     t.datetime "SchedTime"
     t.datetime "StartTime"
     t.datetime "EndTime"
     t.datetime "RealEndTime"
     t.integer  "JobTDate",        limit: 8,   default: 0
     t.integer  "VolSessionId",                default: 0
     t.integer  "VolSessionTime",              default: 0
     t.integer  "JobFiles",                    default: 0
     t.integer  "JobBytes",        limit: 8,   default: 0
     t.integer  "ReadBytes",       limit: 8,   default: 0
     t.integer  "JobErrors",                   default: 0
     t.integer  "JobMissingFiles",             default: 0
     t.integer  "PoolId",                      default: 0
     t.integer  "FileSetId",                   default: 0
     t.integer  "PriorJobId",                  default: 0
     t.integer  "PurgedFiles",     limit: 1,   default: 0
     t.integer  "HasBase",         limit: 1,   default: 0
     t.integer  "HasCache",        limit: 1,   default: 0
     t.integer  "Reviewed",        limit: 1,   default: 0
     t.binary   "Comment"
   end
 
   add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
   add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
 
   create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
     t.integer "JobId",                  null: false
     t.integer "MediaId",                null: false
     t.integer "FirstIndex", default: 0
     t.integer "LastIndex",  default: 0
     t.integer "StartFile",  default: 0
     t.integer "EndFile",    default: 0
     t.integer "StartBlock", default: 0
     t.integer "EndBlock",   default: 0
     t.integer "VolIndex",   default: 0
   end
 
   add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
 
   create_table "Location", primary_key: "LocationId", force: true do |t|
     t.binary  "Location", limit: 255,             null: false
     t.integer "Cost",                 default: 0
     t.integer "Enabled",  limit: 1
   end
 
   create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
     t.datetime "Date"
     t.binary   "Comment",                            null: false
     t.integer  "MediaId",                default: 0
     t.integer  "LocationId",             default: 0
     t.string   "NewVolStatus", limit: 9,             null: false
     t.integer  "NewEnabled",   limit: 1
   end
 
   create_table "Log", primary_key: "LogId", force: true do |t|
     t.integer  "JobId",   default: 0
     t.datetime "Time"
     t.binary   "LogText",             null: false
   end
 
   add_index "Log", ["JobId"], name: "JobId", using: :btree
 
   create_table "Media", primary_key: "MediaId", force: true do |t|
     t.binary   "VolumeName",       limit: 255,             null: false
     t.integer  "Slot",                         default: 0
     t.integer  "PoolId",                       default: 0
     t.binary   "MediaType",        limit: 255,             null: false
     t.integer  "MediaTypeId",                  default: 0
     t.integer  "LabelType",        limit: 1,   default: 0
     t.datetime "FirstWritten"
     t.datetime "LastWritten"
     t.datetime "LabelDate"
     t.integer  "VolJobs",                      default: 0
     t.integer  "VolFiles",                     default: 0
     t.integer  "VolBlocks",                    default: 0
     t.integer  "VolMounts",                    default: 0
     t.integer  "VolBytes",         limit: 8,   default: 0
     t.integer  "VolParts",                     default: 0
     t.integer  "VolErrors",                    default: 0
     t.integer  "VolWrites",                    default: 0
     t.integer  "VolCapacityBytes", limit: 8,   default: 0
     t.string   "VolStatus",        limit: 9,               null: false
     t.integer  "Enabled",          limit: 1,   default: 1
     t.integer  "Recycle",          limit: 1,   default: 0
     t.integer  "ActionOnPurge",    limit: 1,   default: 0
     t.integer  "VolRetention",     limit: 8,   default: 0
     t.integer  "VolUseDuration",   limit: 8,   default: 0
     t.integer  "MaxVolJobs",                   default: 0
     t.integer  "MaxVolFiles",                  default: 0
     t.integer  "MaxVolBytes",      limit: 8,   default: 0
     t.integer  "InChanger",        limit: 1,   default: 0
     t.integer  "StorageId",                    default: 0
     t.integer  "DeviceId",                     default: 0
     t.integer  "MediaAddressing",  limit: 1,   default: 0
     t.integer  "VolReadTime",      limit: 8,   default: 0
     t.integer  "VolWriteTime",     limit: 8,   default: 0
     t.integer  "EndFile",                      default: 0
     t.integer  "EndBlock",                     default: 0
     t.integer  "LocationId",                   default: 0
     t.integer  "RecycleCount",                 default: 0
     t.datetime "InitialWrite"
     t.integer  "ScratchPoolId",                default: 0
     t.integer  "RecyclePoolId",                default: 0
     t.binary   "Comment"
   end
 
   add_index "Media", ["PoolId"], name: "PoolId", using: :btree
   add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
 
   create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
     t.binary  "MediaType", limit: 255,             null: false
     t.integer "ReadOnly",  limit: 1,   default: 0
   end
 
   create_table "Path", primary_key: "PathId", force: true do |t|
     t.binary "Path", null: false
   end
 
   add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
 
   create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
     t.integer "PPathId", null: false
   end
 
   add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
 
   create_table "PathVisibility", id: false, force: true do |t|
     t.integer "PathId",                       null: false
     t.integer "JobId",                        null: false
     t.integer "Size",   limit: 8, default: 0
     t.integer "Files",            default: 0
   end
 
   add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
 
   create_table "Pool", primary_key: "PoolId", force: true do |t|
     t.binary  "Name",               limit: 255,             null: false
     t.integer "NumVols",                        default: 0
     t.integer "MaxVols",                        default: 0
     t.integer "UseOnce",            limit: 1,   default: 0
     t.integer "UseCatalog",         limit: 1,   default: 0
     t.integer "AcceptAnyVolume",    limit: 1,   default: 0
     t.integer "VolRetention",       limit: 8,   default: 0
     t.integer "VolUseDuration",     limit: 8,   default: 0
     t.integer "MaxVolJobs",                     default: 0
     t.integer "MaxVolFiles",                    default: 0
     t.integer "MaxVolBytes",        limit: 8,   default: 0
     t.integer "AutoPrune",          limit: 1,   default: 0
     t.integer "Recycle",            limit: 1,   default: 0
     t.integer "ActionOnPurge",      limit: 1,   default: 0
     t.string  "PoolType",           limit: 9,               null: false
     t.integer "LabelType",          limit: 1,   default: 0
     t.binary  "LabelFormat",        limit: 255
     t.integer "Enabled",            limit: 1,   default: 1
     t.integer "ScratchPoolId",                  default: 0
     t.integer "RecyclePoolId",                  default: 0
     t.integer "NextPoolId",                     default: 0
     t.integer "MigrationHighBytes", limit: 8,   default: 0
     t.integer "MigrationLowBytes",  limit: 8,   default: 0
     t.integer "MigrationTime",      limit: 8,   default: 0
   end
 
   add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
 
   create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
     t.binary  "ObjectName",                                       null: false
     t.binary  "RestoreObject",     limit: 2147483647,             null: false
     t.binary  "PluginName",        limit: 255,                    null: false
     t.integer "ObjectLength",                         default: 0
     t.integer "ObjectFullLength",                     default: 0
     t.integer "ObjectIndex",                          default: 0
     t.integer "ObjectType",                           default: 0
     t.integer "FileIndex",                            default: 0
     t.integer "JobId",                                            null: false
     t.integer "ObjectCompression",                    default: 0
   end
 
   add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
 
   create_table "Status", primary_key: "JobStatus", force: true do |t|
     t.binary  "JobStatusLong"
     t.integer "Severity"
   end
 
   create_table "Storage", primary_key: "StorageId", force: true do |t|
     t.binary  "Name",        limit: 255,             null: false
     t.integer "AutoChanger", limit: 1,   default: 0
   end
 
   create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
     t.integer "JobId",      null: false
     t.integer "PathId",     null: false
     t.integer "FilenameId", null: false
   end
 
   create_table "Version", id: false, force: true do |t|
     t.integer "VersionId", null: false
   end
 
   create_table "filesets", force: true do |t|
     t.string   "name"
     t.integer  "host_id"
     t.text     "exclude_directions"
     t.text     "include_directions"
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
   add_index "filesets", ["host_id"], name: "index_filesets_on_host_id", using: :btree
 
   create_table "hosts", force: true do |t|
     t.binary   "name",           limit: 255,                 null: false
     t.binary   "fqdn",           limit: 255,                 null: false
     t.integer  "port",                                       null: false
     t.integer  "file_retention",                             null: false
     t.integer  "job_retention",                              null: false
     t.datetime "created_at"
     t.datetime "updated_at"
     t.string   "password"
     t.boolean  "baculized",                  default: false, null: false
     t.datetime "baculized_at"
     t.integer  "status",         limit: 1,   default: 0
     t.integer  "client_id"
     t.boolean  "verified",                   default: false
   end
 
   add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
 
   create_table "job_templates", force: true do |t|
     t.string   "name",                                       null: false
     t.integer  "job_type",         limit: 1
     t.integer  "host_id"
     t.integer  "fileset_id"
     t.integer  "schedule_id"
     t.datetime "created_at"
     t.datetime "updated_at"
     t.boolean  "enabled",                    default: false
     t.binary   "restore_location"
   end
 
+  create_table "ownerships", force: true do |t|
+    t.integer  "user_id"
+    t.integer  "host_id"
+    t.datetime "created_at"
+    t.datetime "updated_at"
+  end
+
   create_table "schedules", force: true do |t|
     t.string  "name"
     t.string  "runs"
     t.integer "host_id"
   end
 
   add_index "schedules", ["host_id"], name: "index_schedules_on_host_id", using: :btree
 
   create_table "users", force: true do |t|
     t.string   "username",                             null: false
     t.string   "email"
     t.integer  "user_type",  limit: 1,                 null: false
     t.boolean  "enabled",              default: false
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
 end