diff --git a/app/controllers/hosts_controller.rb b/app/controllers/hosts_controller.rb
index ac74a94..eb05f8d 100644
--- a/app/controllers/hosts_controller.rb
+++ b/app/controllers/hosts_controller.rb
@@ -1,171 +1,176 @@
 class HostsController < ApplicationController
   before_action :require_logged_in
   before_action :fetch_host, only: [:show, :edit, :update, :destroy, :submit_config,
                                     :revoke, :disable]
   before_action :fetch_hosts_of_user, only: [:new, :edit, :create]
 
   # GET /hosts/new
   def new
     @host = Host.new
     @host.port = 9102
   end
 
   # POST /hosts
   def create
     @host = Host.new(fetch_params)
 
     set_host_type
 
     @host.verified = !@host.institutional?
 
     if user_can_add_this_host? && @host.save
       flash[:success] = 'Host created successfully'
       current_user.hosts << @host
       UserMailer.notify_admin(current_user, @host.fqdn).deliver
       redirect_to host_path @host
     else
       flash[:error] = 'Host was not created'
       render :new
     end
   end
 
   # GET /hosts/1
   def show
     @schedules = @host.job_templates.map(&:schedule)
     @filesets = @host.job_templates.map(&:fileset)
   end
 
   # GET /hosts/1/edit
   def edit; end
 
   # PATCH /hosts/1
   def update
     updates = fetch_params.slice(:port, :password)
     if updates.present? && @host.update_attributes(updates)
       @host.recalculate if @host.bacula_ready?
       flash[:success] = 'Host updated successfully. You must update your file deamon accordingly.'
       redirect_to host_path @host
     else
       render :edit
     end
   end
 
   # DELETE /hosts/1
   def destroy
     if @host.destroy
       flash[:success] = 'Host destroyed successfully'
     else
       flash[:error] = 'Host not destroyed'
     end
 
     redirect_to root_path
   end
 
   # POST /hosts/1/disable
   def disable
     if @host.disable_jobs_and_update
       flash[:success] = 'Client disabled'
     else
       flash[:error] = 'Something went wrong, try again later'
     end
 
     redirect_to host_path(@host)
   end
 
   # POST /hosts/1/submit_config
   def submit_config
     if @host.dispatch_to_bacula
       flash[:success] = 'Host configuration sent to Bacula successfully'
     else
       flash[:error] = 'Something went wrong, try again later'
     end
 
     redirect_to host_path(@host)
   end
 
   # DELETE /hosts/1/revoke
   def revoke
     if @host.remove_from_bacula
       flash[:success] = 'Host configuration removed from Bacula successfully'
     else
       flash[:error] = 'Something went wrong, try again later'
     end
 
     redirect_to root_path
   end
 
   # GET /hosts/fetch_vima_hosts
   def fetch_vima_hosts
     if params[:code].blank?
       return redirect_to client.auth_code.authorize_url(:redirect_uri => redirect_uri,
                                                         scope: 'read')
     end
 
     access_token = client.auth_code.get_token(
       params['code'],
       { :redirect_uri => redirect_uri },
       { :mode => :query, :param_name => "access_token", :header_format => "" }
     )
 
     vms = access_token.get(
       'https://vima.grnet.gr/instances/list?tag=vima:service:archiving',
       { mode: :query, param_name: 'access_token' }
     ).parsed.deep_symbolize_keys[:response][:instances]
 
     session[:vms] = vms.first(50)
+
+    current_user.temp_hosts = vms
+    current_user.hosts_updated_at = Time.now
+    current_user.save
+
     Host.where(fqdn: vms).each do |host|
       host.users << current_user unless host.users.include?(current_user)
     end
 
     redirect_to new_host_path
   end
 
   private
 
   def client
     OAuth2::Client.new(
       Rails.application.secrets.oauth2_vima_client_id,
       Rails.application.secrets.oauth2_vima_secret,
       site: 'https://vima.grnet.gr',
       token_url: "/o/token",
       authorize_url: "/o/authorize",
       :ssl => {:ca_path => "/etc/ssl/certs"}
     )
   end
 
   def redirect_uri
     uri = URI.parse(request.url)
     uri.scheme = 'https' unless Rails.env.development?
     uri.path = '/hosts/fetch_vima_hosts'
     uri.query = nil
     uri.to_s
   end
 
   def fetch_hosts_of_user
     return if not current_user.needs_host_list?
 
     @hosts_of_user = session[:vms] - current_user.hosts.pluck(:fqdn)
   end
 
   def fetch_host
     @host = current_user.hosts.includes(job_templates: [:fileset, :schedule]).find(params[:id])
   end
 
   def fetch_params
     params.require(:host).permit(:fqdn, :port, :password)
   end
 
   def user_can_add_this_host?
     !current_user.needs_host_list? || @hosts_of_user.include?(@host.fqdn)
   end
 
   def set_host_type
     @host.origin = if current_user.vima?
                      :vima
                    elsif current_user.okeanos?
                      :okeanos
                    else
                      :institutional
                    end
   end
 end
diff --git a/app/models/user.rb b/app/models/user.rb
index 4a658c1..8684a93 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -1,120 +1,122 @@
 class User < ActiveRecord::Base
 
   attr_accessor :password, :retype_password
 
+  serialize :temp_hosts, JSON
+
   has_many :ownerships
   has_many :hosts, through: :ownerships, inverse_of: :users
   has_many :invitations
 
   enum user_type: { institutional: 0, vima: 1, okeanos: 2, admin: 3 }
 
   validates :user_type, presence: true
   validates :username, presence: true, uniqueness: { scope: :user_type }
   validates :email, presence: true, uniqueness: { scope: :user_type }
 
   before_create :confirm_passwords, if: :admin?
 
   # Returns an admin user with the given password
   #
   # @param username[String] username from user input
   # @param a_password[String] password from user input
   #
   # @return [User] the admin user or nil
   def self.fetch_admin_with_password(username, a_password)
     hashed_pass = Digest::SHA256.hexdigest(a_password + Rails.application.secrets.salt)
     admin = User.admin.find_by_username_and_password_hash(username, hashed_pass)
     admin
   end
 
   # Composes the user's display name from the user's username and email
   #
   # @return [String]
   def display_name
     "#{username} <#{email}>"
   end
 
   # Determines if the user must select hosts from a list or enter their
   # FQDN manually
   #
   # @return [Boolean]
   def needs_host_list?
     vima? || okeanos?
   end
 
   # Determines if the user is editable or not.
   # Editable users are only admin users, all others come from 3rd party authorization
   #
   # @return [Boolean]
   def editable?
     admin?
   end
 
   # Marks a user as not enabled
   def ban
     self.enabled = false
     save
   end
 
   # Marks a user as enabled
   def unban
     self.enabled = true
     save
   end
 
   # Stores a hashed password as a password_hash
   #
   # @param a_password[String] the user submitted password
   #
   # @return [Boolean] the save exit status
   def add_password(a_password)
     self.password_hash = Digest::SHA256.hexdigest(a_password + Rails.application.secrets.salt)
     self.save
   end
 
   # Fetches the user's unverified hosts
   #
   # @return [Array] of Strings containing the hosts' names
   def unverified_hosts
     hosts.unverified.pluck(:name)
   end
 
   # Fetches the user's hosts that are being backed up by bacula
   #
   # @return [Array] of Strings configuration the host's names
   def baculized_hosts
     hosts.in_bacula.pluck(:name)
   end
 
   # Fetches the user's hosts that are NOT being backed up by bacula
   #
   # @return [Array] of Strings configuration the host's names
   def non_baculized_hosts
     hosts.not_baculized.pluck(:name)
   end
 
   # Determines if a vima user needs to update his hosts' list
   #
   # @return [Boolean]
   def refetch_hosts?
     return false unless vima?
     return true if hosts_updated_at.nil?
 
     hosts_updated_at < Archiving.settings[:skip_host_fetch_time_period].ago
   end
 
   private
 
   def confirm_passwords
     if password.blank?
       self.errors.add(:password, 'Must give a password')
       return false
     end
     if password != retype_password
       self.errors.add(:password, 'Passwords mismatch')
       self.errors.add(:retype_password, 'Passwords mismatch')
       return false
     end
 
     true
   end
 end
diff --git a/db/migrate/20160131105935_add_temp_hosts_to_user.rb b/db/migrate/20160131105935_add_temp_hosts_to_user.rb
new file mode 100644
index 0000000..f4015dc
--- /dev/null
+++ b/db/migrate/20160131105935_add_temp_hosts_to_user.rb
@@ -0,0 +1,9 @@
+class AddTempHostsToUser < ActiveRecord::Migration
+  def up
+    add_column :users, :temp_hosts, :string, default: [].to_json
+  end
+
+  def down
+    remove_column :users, :temp_hosts
+  end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 7a28564..865db40 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -1,439 +1,440 @@
 # encoding: UTF-8
 # This file is auto-generated from the current state of the database. Instead
 # of editing this file, please use the migrations feature of Active Record to
 # incrementally modify your database, and then regenerate this schema definition.
 #
 # Note that this schema.rb definition is the authoritative source for your
 # database schema. If you need to create the application database on another
 # system, you should be using db:schema:load, not running all the migrations
 # from scratch. The latter is a flawed and unsustainable approach (the more migrations
 # you'll amass, the slower it'll run and the greater likelihood for issues).
 #
 # It's strongly recommended that you check this file into your version control system.
 
-ActiveRecord::Schema.define(version: 20160125170032) do
+ActiveRecord::Schema.define(version: 20160131105935) do
 
   create_table "BaseFiles", primary_key: "BaseId", force: true do |t|
     t.integer "BaseJobId",           null: false
     t.integer "JobId",               null: false
     t.integer "FileId",    limit: 8, null: false
     t.integer "FileIndex"
   end
 
   add_index "BaseFiles", ["JobId"], name: "basefiles_jobid_idx", using: :btree
 
   create_table "CDImages", primary_key: "MediaId", force: true do |t|
     t.datetime "LastBurn", null: false
   end
 
   create_table "Client", primary_key: "ClientId", force: true do |t|
     t.binary  "Name",          limit: 255,             null: false
     t.binary  "Uname",         limit: 255,             null: false
     t.integer "AutoPrune",     limit: 1,   default: 0
     t.integer "FileRetention", limit: 8,   default: 0
     t.integer "JobRetention",  limit: 8,   default: 0
   end
 
   add_index "Client", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
 
   create_table "Counters", id: false, force: true do |t|
     t.binary  "Counter",      limit: 255,             null: false
     t.integer "MinValue",                 default: 0
     t.integer "MaxValue",                 default: 0
     t.integer "CurrentValue",             default: 0
     t.binary  "WrapCounter",  limit: 255,             null: false
   end
 
   create_table "Device", primary_key: "DeviceId", force: true do |t|
     t.binary   "Name",                       limit: 255,             null: false
     t.integer  "MediaTypeId",                            default: 0
     t.integer  "StorageId",                              default: 0
     t.integer  "DevMounts",                              default: 0
     t.integer  "DevReadBytes",               limit: 8,   default: 0
     t.integer  "DevWriteBytes",              limit: 8,   default: 0
     t.integer  "DevReadBytesSinceCleaning",  limit: 8,   default: 0
     t.integer  "DevWriteBytesSinceCleaning", limit: 8,   default: 0
     t.integer  "DevReadTime",                limit: 8,   default: 0
     t.integer  "DevWriteTime",               limit: 8,   default: 0
     t.integer  "DevReadTimeSinceCleaning",   limit: 8,   default: 0
     t.integer  "DevWriteTimeSinceCleaning",  limit: 8,   default: 0
     t.datetime "CleaningDate"
     t.integer  "CleaningPeriod",             limit: 8,   default: 0
   end
 
   create_table "File", primary_key: "FileId", force: true do |t|
     t.integer "FileIndex",              default: 0
     t.integer "JobId",                              null: false
     t.integer "PathId",                             null: false
     t.integer "FilenameId",                         null: false
     t.integer "DeltaSeq",   limit: 2,   default: 0
     t.integer "MarkId",                 default: 0
     t.binary  "LStat",      limit: 255,             null: false
     t.binary  "MD5",        limit: 255
   end
 
   add_index "File", ["JobId", "PathId", "FilenameId"], name: "JobId_2", using: :btree
   add_index "File", ["JobId"], name: "JobId", using: :btree
 
   create_table "FileSet", primary_key: "FileSetId", force: true do |t|
     t.binary   "FileSet",    limit: 255, null: false
     t.binary   "MD5",        limit: 255
     t.datetime "CreateTime"
   end
 
   create_table "Filename", primary_key: "FilenameId", force: true do |t|
     t.binary "Name", null: false
   end
 
   add_index "Filename", ["Name"], name: "Name", length: {"Name"=>255}, using: :btree
 
   create_table "Job", primary_key: "JobId", force: true do |t|
     t.binary   "Job",             limit: 255,             null: false
     t.binary   "Name",            limit: 255,             null: false
     t.binary   "Type",            limit: 1,               null: false
     t.binary   "Level",           limit: 1,               null: false
     t.integer  "ClientId",                    default: 0
     t.binary   "JobStatus",       limit: 1,               null: false
     t.datetime "SchedTime"
     t.datetime "StartTime"
     t.datetime "EndTime"
     t.datetime "RealEndTime"
     t.integer  "JobTDate",        limit: 8,   default: 0
     t.integer  "VolSessionId",                default: 0
     t.integer  "VolSessionTime",              default: 0
     t.integer  "JobFiles",                    default: 0
     t.integer  "JobBytes",        limit: 8,   default: 0
     t.integer  "ReadBytes",       limit: 8,   default: 0
     t.integer  "JobErrors",                   default: 0
     t.integer  "JobMissingFiles",             default: 0
     t.integer  "PoolId",                      default: 0
     t.integer  "FileSetId",                   default: 0
     t.integer  "PriorJobId",                  default: 0
     t.integer  "PurgedFiles",     limit: 1,   default: 0
     t.integer  "HasBase",         limit: 1,   default: 0
     t.integer  "HasCache",        limit: 1,   default: 0
     t.integer  "Reviewed",        limit: 1,   default: 0
     t.binary   "Comment"
   end
 
   add_index "Job", ["Name"], name: "Name", length: {"Name"=>128}, using: :btree
 
   create_table "JobHisto", id: false, force: true do |t|
     t.integer  "JobId",                                   null: false
     t.binary   "Job",             limit: 255,             null: false
     t.binary   "Name",            limit: 255,             null: false
     t.binary   "Type",            limit: 1,               null: false
     t.binary   "Level",           limit: 1,               null: false
     t.integer  "ClientId",                    default: 0
     t.binary   "JobStatus",       limit: 1,               null: false
     t.datetime "SchedTime"
     t.datetime "StartTime"
     t.datetime "EndTime"
     t.datetime "RealEndTime"
     t.integer  "JobTDate",        limit: 8,   default: 0
     t.integer  "VolSessionId",                default: 0
     t.integer  "VolSessionTime",              default: 0
     t.integer  "JobFiles",                    default: 0
     t.integer  "JobBytes",        limit: 8,   default: 0
     t.integer  "ReadBytes",       limit: 8,   default: 0
     t.integer  "JobErrors",                   default: 0
     t.integer  "JobMissingFiles",             default: 0
     t.integer  "PoolId",                      default: 0
     t.integer  "FileSetId",                   default: 0
     t.integer  "PriorJobId",                  default: 0
     t.integer  "PurgedFiles",     limit: 1,   default: 0
     t.integer  "HasBase",         limit: 1,   default: 0
     t.integer  "HasCache",        limit: 1,   default: 0
     t.integer  "Reviewed",        limit: 1,   default: 0
     t.binary   "Comment"
   end
 
   add_index "JobHisto", ["JobId"], name: "JobId", using: :btree
   add_index "JobHisto", ["StartTime"], name: "StartTime", using: :btree
 
   create_table "JobMedia", primary_key: "JobMediaId", force: true do |t|
     t.integer "JobId",                  null: false
     t.integer "MediaId",                null: false
     t.integer "FirstIndex", default: 0
     t.integer "LastIndex",  default: 0
     t.integer "StartFile",  default: 0
     t.integer "EndFile",    default: 0
     t.integer "StartBlock", default: 0
     t.integer "EndBlock",   default: 0
     t.integer "VolIndex",   default: 0
   end
 
   add_index "JobMedia", ["JobId", "MediaId"], name: "JobId", using: :btree
 
   create_table "Location", primary_key: "LocationId", force: true do |t|
     t.binary  "Location", limit: 255,             null: false
     t.integer "Cost",                 default: 0
     t.integer "Enabled",  limit: 1
   end
 
   create_table "LocationLog", primary_key: "LocLogId", force: true do |t|
     t.datetime "Date"
     t.binary   "Comment",                            null: false
     t.integer  "MediaId",                default: 0
     t.integer  "LocationId",             default: 0
     t.string   "NewVolStatus", limit: 9,             null: false
     t.integer  "NewEnabled",   limit: 1
   end
 
   create_table "Log", primary_key: "LogId", force: true do |t|
     t.integer  "JobId",   default: 0
     t.datetime "Time"
     t.binary   "LogText",             null: false
   end
 
   add_index "Log", ["JobId"], name: "JobId", using: :btree
 
   create_table "Media", primary_key: "MediaId", force: true do |t|
     t.binary   "VolumeName",       limit: 255,             null: false
     t.integer  "Slot",                         default: 0
     t.integer  "PoolId",                       default: 0
     t.binary   "MediaType",        limit: 255,             null: false
     t.integer  "MediaTypeId",                  default: 0
     t.integer  "LabelType",        limit: 1,   default: 0
     t.datetime "FirstWritten"
     t.datetime "LastWritten"
     t.datetime "LabelDate"
     t.integer  "VolJobs",                      default: 0
     t.integer  "VolFiles",                     default: 0
     t.integer  "VolBlocks",                    default: 0
     t.integer  "VolMounts",                    default: 0
     t.integer  "VolBytes",         limit: 8,   default: 0
     t.integer  "VolParts",                     default: 0
     t.integer  "VolErrors",                    default: 0
     t.integer  "VolWrites",                    default: 0
     t.integer  "VolCapacityBytes", limit: 8,   default: 0
     t.string   "VolStatus",        limit: 9,               null: false
     t.integer  "Enabled",          limit: 1,   default: 1
     t.integer  "Recycle",          limit: 1,   default: 0
     t.integer  "ActionOnPurge",    limit: 1,   default: 0
     t.integer  "VolRetention",     limit: 8,   default: 0
     t.integer  "VolUseDuration",   limit: 8,   default: 0
     t.integer  "MaxVolJobs",                   default: 0
     t.integer  "MaxVolFiles",                  default: 0
     t.integer  "MaxVolBytes",      limit: 8,   default: 0
     t.integer  "InChanger",        limit: 1,   default: 0
     t.integer  "StorageId",                    default: 0
     t.integer  "DeviceId",                     default: 0
     t.integer  "MediaAddressing",  limit: 1,   default: 0
     t.integer  "VolReadTime",      limit: 8,   default: 0
     t.integer  "VolWriteTime",     limit: 8,   default: 0
     t.integer  "EndFile",                      default: 0
     t.integer  "EndBlock",                     default: 0
     t.integer  "LocationId",                   default: 0
     t.integer  "RecycleCount",                 default: 0
     t.datetime "InitialWrite"
     t.integer  "ScratchPoolId",                default: 0
     t.integer  "RecyclePoolId",                default: 0
     t.binary   "Comment"
   end
 
   add_index "Media", ["PoolId"], name: "PoolId", using: :btree
   add_index "Media", ["VolumeName"], name: "VolumeName", unique: true, length: {"VolumeName"=>128}, using: :btree
 
   create_table "MediaType", primary_key: "MediaTypeId", force: true do |t|
     t.binary  "MediaType", limit: 255,             null: false
     t.integer "ReadOnly",  limit: 1,   default: 0
   end
 
   create_table "Path", primary_key: "PathId", force: true do |t|
     t.binary "Path", null: false
   end
 
   add_index "Path", ["Path"], name: "Path", length: {"Path"=>255}, using: :btree
 
   create_table "PathHierarchy", primary_key: "PathId", force: true do |t|
     t.integer "PPathId", null: false
   end
 
   add_index "PathHierarchy", ["PPathId"], name: "pathhierarchy_ppathid", using: :btree
 
   create_table "PathVisibility", id: false, force: true do |t|
     t.integer "PathId",                       null: false
     t.integer "JobId",                        null: false
     t.integer "Size",   limit: 8, default: 0
     t.integer "Files",            default: 0
   end
 
   add_index "PathVisibility", ["JobId"], name: "pathvisibility_jobid", using: :btree
 
   create_table "Pool", primary_key: "PoolId", force: true do |t|
     t.binary  "Name",               limit: 255,             null: false
     t.integer "NumVols",                        default: 0
     t.integer "MaxVols",                        default: 0
     t.integer "UseOnce",            limit: 1,   default: 0
     t.integer "UseCatalog",         limit: 1,   default: 0
     t.integer "AcceptAnyVolume",    limit: 1,   default: 0
     t.integer "VolRetention",       limit: 8,   default: 0
     t.integer "VolUseDuration",     limit: 8,   default: 0
     t.integer "MaxVolJobs",                     default: 0
     t.integer "MaxVolFiles",                    default: 0
     t.integer "MaxVolBytes",        limit: 8,   default: 0
     t.integer "AutoPrune",          limit: 1,   default: 0
     t.integer "Recycle",            limit: 1,   default: 0
     t.integer "ActionOnPurge",      limit: 1,   default: 0
     t.string  "PoolType",           limit: 9,               null: false
     t.integer "LabelType",          limit: 1,   default: 0
     t.binary  "LabelFormat",        limit: 255
     t.integer "Enabled",            limit: 1,   default: 1
     t.integer "ScratchPoolId",                  default: 0
     t.integer "RecyclePoolId",                  default: 0
     t.integer "NextPoolId",                     default: 0
     t.integer "MigrationHighBytes", limit: 8,   default: 0
     t.integer "MigrationLowBytes",  limit: 8,   default: 0
     t.integer "MigrationTime",      limit: 8,   default: 0
   end
 
   add_index "Pool", ["Name"], name: "Name", unique: true, length: {"Name"=>128}, using: :btree
 
   create_table "RestoreObject", primary_key: "RestoreObjectId", force: true do |t|
     t.binary  "ObjectName",                                       null: false
     t.binary  "RestoreObject",     limit: 2147483647,             null: false
     t.binary  "PluginName",        limit: 255,                    null: false
     t.integer "ObjectLength",                         default: 0
     t.integer "ObjectFullLength",                     default: 0
     t.integer "ObjectIndex",                          default: 0
     t.integer "ObjectType",                           default: 0
     t.integer "FileIndex",                            default: 0
     t.integer "JobId",                                            null: false
     t.integer "ObjectCompression",                    default: 0
   end
 
   add_index "RestoreObject", ["JobId"], name: "JobId", using: :btree
 
   create_table "Status", primary_key: "JobStatus", force: true do |t|
     t.binary  "JobStatusLong"
     t.integer "Severity"
   end
 
   create_table "Storage", primary_key: "StorageId", force: true do |t|
     t.binary  "Name",        limit: 255,             null: false
     t.integer "AutoChanger", limit: 1,   default: 0
   end
 
   create_table "UnsavedFiles", primary_key: "UnsavedId", force: true do |t|
     t.integer "JobId",      null: false
     t.integer "PathId",     null: false
     t.integer "FilenameId", null: false
   end
 
   create_table "Version", id: false, force: true do |t|
     t.integer "VersionId", null: false
   end
 
   create_table "configuration_settings", force: true do |t|
     t.string   "job",        default: "{}"
     t.string   "client",     default: "{}"
     t.datetime "created_at"
     t.datetime "updated_at"
     t.string   "pool",       default: "{}"
   end
 
   create_table "filesets", force: true do |t|
     t.string   "name"
     t.integer  "host_id"
     t.text     "exclude_directions"
     t.text     "include_directions"
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
   add_index "filesets", ["host_id"], name: "index_filesets_on_host_id", using: :btree
 
   create_table "hosts", force: true do |t|
     t.binary   "name",                       limit: 255,                 null: false
     t.binary   "fqdn",                       limit: 255,                 null: false
     t.integer  "port",                                                   null: false
     t.integer  "file_retention",                                         null: false
     t.integer  "job_retention",                                          null: false
     t.datetime "created_at"
     t.datetime "updated_at"
     t.string   "password"
     t.boolean  "baculized",                              default: false, null: false
     t.datetime "baculized_at"
     t.integer  "status",                     limit: 1,   default: 0
     t.integer  "client_id"
     t.boolean  "verified",                               default: false
     t.datetime "verified_at"
     t.integer  "verifier_id"
     t.string   "job_retention_period_type"
     t.string   "file_retention_period_type"
     t.integer  "origin",                     limit: 1
   end
 
   add_index "hosts", ["name"], name: "index_hosts_on_name", unique: true, length: {"name"=>128}, using: :btree
 
   create_table "invitations", force: true do |t|
     t.integer  "user_id"
     t.integer  "host_id"
     t.string   "verification_code"
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
   add_index "invitations", ["user_id", "verification_code"], name: "index_invitations_on_user_id_and_verification_code", using: :btree
 
   create_table "job_templates", force: true do |t|
     t.string   "name",                                             null: false
     t.integer  "job_type",               limit: 1
     t.integer  "host_id"
     t.integer  "fileset_id"
     t.integer  "schedule_id"
     t.datetime "created_at"
     t.datetime "updated_at"
     t.boolean  "enabled",                          default: false
     t.binary   "restore_location"
     t.boolean  "baculized",                        default: false
     t.datetime "baculized_at"
     t.string   "client_before_run_file"
     t.string   "client_after_run_file"
   end
 
   create_table "ownerships", force: true do |t|
     t.integer  "user_id"
     t.integer  "host_id"
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
   create_table "schedule_runs", force: true do |t|
     t.integer  "schedule_id"
     t.integer  "level",       limit: 1
     t.string   "month"
     t.string   "day"
     t.string   "time"
     t.datetime "created_at"
     t.datetime "updated_at"
   end
 
   add_index "schedule_runs", ["schedule_id"], name: "index_schedule_runs_on_schedule_id", using: :btree
 
   create_table "schedules", force: true do |t|
     t.string  "name"
     t.string  "runs"
     t.integer "host_id"
   end
 
   add_index "schedules", ["host_id"], name: "index_schedules_on_host_id", using: :btree
 
   create_table "users", force: true do |t|
     t.string   "username",                                   null: false
     t.string   "email"
     t.integer  "user_type",        limit: 1,                 null: false
     t.boolean  "enabled",                    default: false
     t.datetime "created_at"
     t.datetime "updated_at"
     t.string   "identifier"
     t.string   "password_hash"
     t.datetime "login_at"
     t.datetime "hosts_updated_at"
+    t.string   "temp_hosts",                 default: "[]"
   end
 
   add_index "users", ["identifier"], name: "index_users_on_identifier", using: :btree
   add_index "users", ["password_hash"], name: "index_users_on_password_hash", using: :btree
 
 end
diff --git a/lib/peter/strategies/vima.rb b/lib/peter/strategies/vima.rb
index aee263e..b53d230 100644
--- a/lib/peter/strategies/vima.rb
+++ b/lib/peter/strategies/vima.rb
@@ -1,122 +1,123 @@
 ## -*- encoding : utf-8 -*-
 require 'oauth2'
 
 Warden::Strategies.add(:vima) do
   Key = Rails.application.secrets.oauth2_vima_client_id
   Secret = Rails.application.secrets.oauth2_vima_secret
 
   def valid?
     params['vima'] || params['error'] || params['code']
   end
 
   def client
     OAuth2::Client.new(
       Key,
       Secret,
       site: 'https://vima.grnet.gr',
       token_url: "/o/token",
       authorize_url: "/o/authorize",
       :ssl => {:ca_path => "/etc/ssl/certs"}
     )
   end
 
   def redirect_uri
     uri = URI.parse(request.url)
     uri.scheme = 'https' unless Rails.env.development?
     uri.path = '/vima'
     uri.query = nil
     uri.to_s
   end
 
   def redirect_to_vima
     redirect! client.auth_code.authorize_url(:redirect_uri => redirect_uri, scope: 'read')
   end
 
   def authenticate!
     if !Archiving::settings[:vima_oauth_enabled]
       return fail!("ViMa is temporarily disabled")
     end
 
     if params['error']
       Rails.logger.warn("WARDEN: ERROR #{params['error']}")
       return fail!("ViMa log in failed: #{params['error']}")
     end
 
     return redirect_to_vima if params['vima']
 
     access_token = client.auth_code.get_token(
       params['code'],
       { :redirect_uri => redirect_uri },
       { :mode => :query, :param_name => "access_token", :header_format => "" })
 
     user_data = access_token.get(
       'https://vima.grnet.gr/user/details',
       { mode: :query, param_name: 'access_token' }
     ).parsed.deep_symbolize_keys
 
     if [user_data[:username], user_data[:email], user_data[:id]].any?(&:blank?)
       return fail!("ViMa login failed: no user data")
     end
 
     ###### TBR
     # temporary, for user migration
     user = User.find_or_initialize_by(username: user_data[:username],
                                       email: user_data[:email])
     user.identifier = "vima:#{user_data[:id]}"
     ######
 
     # actual implementation
     #user = User.find_or_initialize_by(identifier: user_data[:identifier])
 
     if !user.enabled? && user.persisted?
       return fail!('Service not available')
     end
 
     user.login_at = Time.now
 
     if user.new_record?
       user.enabled = true
       # TBR
       user.identifier = "vima:#{user_data[:id]}"
       user.vima!
     else
       user.save!
     end
 
     if user.refetch_hosts?
       vms = fetch_vms(access_token)[:response][:instances]
       user.hosts_updated_at = Time.now
+      user.temp_hosts = vms
       user.save
     end
 
-    vms ||= user.hosts.pluck(:fqdn)
+    vms ||= (user.temp_hosts + user.hosts.pluck(:fqdn)).uniq
 
     assign_vms(user, vms)
 
     success!(user)
   end
 
   def fetch_vms(access_token)
     Rails.logger.warn("ViMa: fetching vms")
     vms = access_token.get(
       'https://vima.grnet.gr/instances/list?tag=vima:service:archiving',
       { mode: :query, param_name: 'access_token' }
     ).parsed.deep_symbolize_keys
 
     if vms[:response][:errors] != false
       Rails.logger.warn("ViMa: errors on instances/list response for user #{vms[:user][:username]}")
     end
 
     vms
   end
 
   def assign_vms(user, vms)
     Rails.logger.warn("ViMa: user: #{user.username}")
     Rails.logger.warn("ViMa: vms: #{vms}")
     Rails.logger.warn("ViMa: session vms: #{session[:vms]}")
     session[:vms] = vms.first(50)
     Host.where(fqdn: vms).each do |host|
       host.users << user unless host.users.include?(user)
     end
   end
 end