INSTRUCTION
stringlengths 202
35.5k
| RESPONSE
stringlengths 75
161k
|
---|---|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Components
class FetchService
include Gitlab::Utils::StrongMemoize
COMPONENT_PATHS = [
::Gitlab::Ci::Components::InstancePath
].freeze
def initialize(address:, current_user:)
@address = address
@current_user = current_user
end
def execute
unless component_path_class
return ServiceResponse.error(
message: "#{error_prefix} the component path is not supported",
reason: :unsupported_path)
end
component_path = component_path_class.new(address: address)
result = component_path.fetch_content!(current_user: current_user)
if result
ServiceResponse.success(payload: {
content: result.content,
path: result.path,
project: component_path.project,
sha: component_path.sha
})
else
ServiceResponse.error(message: "#{error_prefix} content not found", reason: :content_not_found)
end
rescue Gitlab::Access::AccessDeniedError
ServiceResponse.error(
message: "#{error_prefix} project does not exist or you don't have sufficient permissions",
reason: :not_allowed)
end
private
attr_reader :current_user, :address
def component_path_class
COMPONENT_PATHS.find { |klass| klass.match?(address) }
end
strong_memoize_attr :component_path_class
def error_prefix
"component '#{address}' -"
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Components::FetchService, feature_category: :pipeline_composition do
let_it_be(:user) { create(:user) }
let_it_be(:current_user) { user }
let_it_be(:current_host) { Gitlab.config.gitlab.host }
let_it_be(:content) do
<<~COMPONENT
job:
script: echo
COMPONENT
end
let(:service) do
described_class.new(address: address, current_user: current_user)
end
let_it_be(:project) do
project = create(
:project, :custom_repo,
files: {
'template.yml' => content,
'my-component/template.yml' => content,
'my-dir/my-component/template.yml' => content
}
)
project.repository.add_tag(project.creator, 'v0.1', project.repository.commit.sha)
project
end
before do
project.add_developer(user)
end
describe '#execute', :aggregate_failures do
subject(:result) { service.execute }
shared_examples 'an external component' do
shared_examples 'component address' do
context 'when content exists' do
it 'returns the content' do
expect(result).to be_success
expect(result.payload[:content]).to eq(content)
end
end
context 'when content does not exist' do
let(:address) { "#{current_host}/#{component_path}@~version-does-not-exist" }
it 'returns an error' do
expect(result).to be_error
expect(result.reason).to eq(:content_not_found)
end
end
end
context 'when user does not have permissions to read the code' do
let(:version) { 'master' }
let(:current_user) { create(:user) }
it 'returns an error' do
expect(result).to be_error
expect(result.reason).to eq(:not_allowed)
end
end
context 'when version is a branch name' do
it_behaves_like 'component address' do
let(:version) { project.default_branch }
end
end
context 'when version is a tag name' do
it_behaves_like 'component address' do
let(:version) { project.repository.tags.first.name }
end
end
context 'when version is a commit sha' do
it_behaves_like 'component address' do
let(:version) { project.repository.tags.first.id }
end
end
context 'when version is not provided' do
let(:version) { nil }
it 'returns an error' do
expect(result).to be_error
expect(result.reason).to eq(:content_not_found)
end
end
context 'when project does not exist' do
let(:component_path) { 'unknown/component' }
let(:version) { '1.0' }
it 'returns an error' do
expect(result).to be_error
expect(result.reason).to eq(:content_not_found)
end
end
context 'when host is different than the current instance host' do
let(:current_host) { 'another-host.com' }
let(:version) { '1.0' }
it 'returns an error' do
expect(result).to be_error
expect(result.reason).to eq(:unsupported_path)
end
end
end
context 'when address points to an external component' do
let(:address) { "#{current_host}/#{component_path}@#{version}" }
context 'when component path is the full path to a project' do
let(:component_path) { project.full_path }
let(:component_yaml_path) { 'template.yml' }
it_behaves_like 'an external component'
end
context 'when component path points to a directory in a project' do
let(:component_path) { "#{project.full_path}/my-component" }
let(:component_yaml_path) { 'my-component/template.yml' }
it_behaves_like 'an external component'
end
context 'when component path points to a nested directory in a project' do
let(:component_path) { "#{project.full_path}/my-dir/my-component" }
let(:component_yaml_path) { 'my-dir/my-component/template.yml' }
it_behaves_like 'an external component'
end
end
end
def stub_project_blob(ref, path, content)
allow_next_instance_of(Repository) do |instance|
allow(instance).to receive(:blob_data_at).with(ref, path).and_return(content)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PrometheusMetrics
class ObserveHistogramsService
class << self
def available_histograms
@available_histograms ||= [
histogram(:pipeline_graph_link_calculation_duration_seconds, 'Total time spent calculating links, in seconds', {}, [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 1, 2]),
histogram(:pipeline_graph_links_total, 'Number of links per graph', {}, [1, 5, 10, 25, 50, 100, 200]),
histogram(:pipeline_graph_links_per_job_ratio, 'Ratio of links to job per graph', {}, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
].to_h
end
private
def histogram(name, *attrs)
[name.to_s, proc { Gitlab::Metrics.histogram(name, *attrs) }]
end
end
def initialize(project, params)
@project = project
@params = params
end
def execute
params
.fetch(:histograms, [])
.each { |data| observe(data) }
ServiceResponse.success(http_status: :created)
end
private
attr_reader :project, :params
def observe(data)
histogram = find_histogram(data[:name])
histogram.observe({}, data[:value].to_f)
end
def find_histogram(name)
self.class.available_histograms
.fetch(name) { raise ActiveRecord::RecordNotFound }
.call
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PrometheusMetrics::ObserveHistogramsService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let(:params) { {} }
subject(:execute) { described_class.new(project, params).execute }
before do
Gitlab::Metrics.reset_registry!
end
context 'with empty data' do
it 'does not raise errors' do
is_expected.to be_success
end
end
context 'observes metrics successfully' do
let(:params) do
{
histograms: [
{ name: 'pipeline_graph_link_calculation_duration_seconds', value: '1' },
{ name: 'pipeline_graph_links_per_job_ratio', value: '0.9' }
]
}
end
it 'increments the metrics' do
execute
expect(histogram_data).to match(a_hash_including({ 0.8 => 0.0, 1 => 1.0, 2 => 1.0 }))
expect(histogram_data(:pipeline_graph_links_per_job_ratio))
.to match(a_hash_including({ 0.8 => 0.0, 0.9 => 1.0, 1 => 1.0 }))
end
it 'returns an empty body and status code' do
is_expected.to be_success
expect(subject.http_status).to eq(:created)
expect(subject.payload).to eq({})
end
end
context 'with unknown histograms' do
let(:params) do
{ histograms: [{ name: 'chunky_bacon', value: '4' }] }
end
it 'raises ActiveRecord::RecordNotFound error' do
expect { subject }.to raise_error ActiveRecord::RecordNotFound
end
end
def histogram_data(name = :pipeline_graph_link_calculation_duration_seconds)
Gitlab::Metrics.registry.get(name)&.get({})
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Refs
class EnqueuePipelinesToUnlockService
include BaseServiceUtility
BATCH_SIZE = 50
ENQUEUE_INTERVAL_SECONDS = 0.1
EXCLUDED_IDS_LIMIT = 1000
def execute(ci_ref, before_pipeline: nil)
total_new_entries = 0
pipelines_scope(ci_ref, before_pipeline).each_batch(of: BATCH_SIZE) do |batch|
pipeline_ids = batch.pluck(:id) # rubocop: disable CodeReuse/ActiveRecord
total_added = Ci::UnlockPipelineRequest.enqueue(pipeline_ids)
total_new_entries += total_added
# Take a little rest to avoid overloading Redis
sleep ENQUEUE_INTERVAL_SECONDS
end
success(
total_pending_entries: Ci::UnlockPipelineRequest.total_pending,
total_new_entries: total_new_entries
)
end
private
def pipelines_scope(ci_ref, before_pipeline)
scope = ci_ref.pipelines.artifacts_locked
if before_pipeline
# We use `same_family_pipeline_ids.map(&:id)` to force run the query and
# specifically pass the array of IDs to the NOT IN condition. If not, we would
# end up running the subquery for same_family_pipeline_ids on each batch instead.
excluded_ids = before_pipeline.same_family_pipeline_ids.map(&:id)
scope = scope.created_before_id(before_pipeline.id)
# When unlocking previous pipelines, we still want to keep the
# last successful CI source pipeline locked.
# If before_pipeline is not provided, like in the case of deleting a ref,
# we want to unlock all pipelines instead.
ci_ref.last_successful_ci_source_pipeline.try do |pipeline|
excluded_ids.concat(pipeline.same_family_pipeline_ids.map(&:id))
end
# We add a limit to the excluded IDs just to be safe and avoid any
# arity issues with the NOT IN query.
scope = scope.where.not(id: excluded_ids.take(EXCLUDED_IDS_LIMIT)) # rubocop: disable CodeReuse/ActiveRecord
end
scope
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Refs::EnqueuePipelinesToUnlockService, :unlock_pipelines, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
describe '#execute' do
let_it_be(:ref) { 'master' }
let_it_be(:project) { create(:project) }
let_it_be(:tag_ref_path) { "#{::Gitlab::Git::TAG_REF_PREFIX}#{ref}" }
let_it_be(:ci_ref_tag) { create(:ci_ref, ref_path: tag_ref_path, project: project) }
let_it_be(:branch_ref_path) { "#{::Gitlab::Git::BRANCH_REF_PREFIX}#{ref}" }
let_it_be(:ci_ref_branch) { create(:ci_ref, ref_path: branch_ref_path, project: project) }
let_it_be(:other_ref) { 'other_ref' }
let_it_be(:other_ref_path) { "#{::Gitlab::Git::BRANCH_REF_PREFIX}#{other_ref}" }
let_it_be(:other_ci_ref) { create(:ci_ref, ref_path: other_ref_path, project: project) }
let(:service) { described_class.new }
subject(:execute) { service.execute(target_ref, before_pipeline: before_pipeline) }
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
stub_const("#{described_class}::ENQUEUE_INTERVAL_SECONDS", 0)
end
shared_examples_for 'unlocking pipelines' do
let(:is_tag) { target_ref.ref_path.include?(::Gitlab::Git::TAG_REF_PREFIX) }
let!(:other_ref_pipeline) { create_pipeline(:locked, other_ref, :failed, tag: false) }
let!(:old_unlocked_pipeline) { create_pipeline(:unlocked, ref, :failed) }
let!(:old_locked_pipeline_1) { create_pipeline(:locked, ref, :failed) }
let!(:old_locked_pipeline_2) { create_pipeline(:locked, ref, :success) }
let!(:old_locked_pipeline_3) { create_pipeline(:locked, ref, :success) }
let!(:old_locked_pipeline_3_child) { create_pipeline(:locked, ref, :success, child_of: old_locked_pipeline_3) }
let!(:old_locked_pipeline_4) { create_pipeline(:locked, ref, :success) }
let!(:old_locked_pipeline_4_child) { create_pipeline(:locked, ref, :success, child_of: old_locked_pipeline_4) }
let!(:old_locked_pipeline_5) { create_pipeline(:locked, ref, :failed) }
let!(:old_locked_pipeline_5_child) { create_pipeline(:locked, ref, :success, child_of: old_locked_pipeline_5) }
let!(:pipeline) { create_pipeline(:locked, ref, :failed) }
let!(:child_pipeline) { create_pipeline(:locked, ref, :failed, child_of: pipeline) }
let!(:newer_pipeline) { create_pipeline(:locked, ref, :failed) }
context 'when before_pipeline is given' do
let(:before_pipeline) { pipeline }
it 'only enqueues old locked pipelines within the ref, excluding the last successful CI source pipeline' do
expect { execute }
.to change { pipeline_ids_waiting_to_be_unlocked }
.from([])
.to([
old_locked_pipeline_1.id,
old_locked_pipeline_2.id,
old_locked_pipeline_3.id,
old_locked_pipeline_3_child.id,
old_locked_pipeline_5.id,
old_locked_pipeline_5_child.id
])
expect(execute).to include(
status: :success,
total_pending_entries: 6,
total_new_entries: 6
)
end
end
context 'when before_pipeline is not given' do
let(:before_pipeline) { nil }
it 'enqueues all locked pipelines within the ref' do
expect { execute }
.to change { pipeline_ids_waiting_to_be_unlocked }
.from([])
.to([
old_locked_pipeline_1.id,
old_locked_pipeline_2.id,
old_locked_pipeline_3.id,
old_locked_pipeline_3_child.id,
old_locked_pipeline_4.id,
old_locked_pipeline_4_child.id,
old_locked_pipeline_5.id,
old_locked_pipeline_5_child.id,
pipeline.id,
child_pipeline.id,
newer_pipeline.id
])
expect(execute).to include(
status: :success,
total_pending_entries: 11,
total_new_entries: 11
)
end
end
end
context 'when ref is a tag' do
let(:target_ref) { ci_ref_tag }
it_behaves_like 'unlocking pipelines'
end
context 'when ref is a branch' do
let(:target_ref) { ci_ref_branch }
it_behaves_like 'unlocking pipelines'
end
def create_pipeline(type, ref, status, tag: is_tag, child_of: nil)
trait = type == :locked ? :artifacts_locked : :unlocked
create(:ci_pipeline, trait, status: status, ref: ref, tag: tag, project: project, child_of: child_of).tap do |p|
if child_of
build = create(:ci_build, pipeline: child_of)
create(:ci_sources_pipeline, source_job: build, source_project: project, pipeline: p, project: project)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Queue
class PendingBuildsStrategy
attr_reader :runner
def initialize(runner)
@runner = runner
end
# rubocop:disable CodeReuse/ActiveRecord
def builds_for_shared_runner
shared_builds = builds_available_for_shared_runners
builds_ordered_for_shared_runners(shared_builds)
end
def builds_for_group_runner
return new_builds.none if runner.namespace_ids.empty?
new_builds_relation = new_builds.where('ci_pending_builds.namespace_traversal_ids && ARRAY[?]::int[]', runner.namespace_ids)
return order(new_builds_relation) if ::Feature.enabled?(:order_builds_for_group_runner)
new_builds_relation
end
def builds_matching_tag_ids(relation, ids)
relation.for_tags(runner.tags_ids)
end
def builds_with_any_tags(relation)
relation.where('cardinality(tag_ids) > 0')
end
def order(relation)
relation.order('build_id ASC')
end
def new_builds
::Ci::PendingBuild.all
end
def build_ids(relation)
relation.pluck(:build_id)
end
private
def builds_available_for_shared_runners
new_builds.with_instance_runners
end
def builds_ordered_for_shared_runners(relation)
if Feature.enabled?(:ci_queueing_disaster_recovery_disable_fair_scheduling, runner, type: :ops)
# if disaster recovery is enabled, we fallback to FIFO scheduling
relation.order('ci_pending_builds.build_id ASC')
else
# Implements Fair Scheduling
# Builds are ordered by projects that have the fewest running builds.
# This keeps projects that create many builds at once from hogging capacity but
# has the downside of penalizing projects with lots of builds created in a short period of time
relation
.with(running_builds_for_shared_runners_cte.to_arel)
.joins("LEFT JOIN project_builds ON ci_pending_builds.project_id = project_builds.project_id")
.order(Arel.sql('COALESCE(project_builds.running_builds, 0) ASC'), 'ci_pending_builds.build_id ASC')
end
end
def running_builds_for_shared_runners_cte
running_builds = ::Ci::RunningBuild
.instance_type
.group(:project_id)
.select(:project_id, 'COUNT(*) AS running_builds')
::Gitlab::SQL::CTE
.new(:project_builds, running_builds, materialized: true)
end
# rubocop:enable CodeReuse/ActiveRecord
end
end
end
Ci::Queue::PendingBuildsStrategy.prepend_mod_with('Ci::Queue::PendingBuildsStrategy')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Queue::PendingBuildsStrategy, feature_category: :continuous_integration do
let_it_be(:group) { create(:group) }
let_it_be(:group_runner) { create(:ci_runner, :group, groups: [group]) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let!(:build_1) { create(:ci_build, :created, pipeline: pipeline) }
let!(:build_2) { create(:ci_build, :created, pipeline: pipeline) }
let!(:build_3) { create(:ci_build, :created, pipeline: pipeline) }
let!(:pending_build_1) { create(:ci_pending_build, build: build_2, project: project) }
let!(:pending_build_2) { create(:ci_pending_build, build: build_3, project: project) }
let!(:pending_build_3) { create(:ci_pending_build, build: build_1, project: project) }
describe 'builds_for_group_runner' do
it 'returns builds ordered by build ID' do
strategy = described_class.new(group_runner)
expect(strategy.builds_for_group_runner).to eq([pending_build_3, pending_build_1, pending_build_2])
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class UpdateRunnerService
attr_reader :runner
def initialize(runner)
@runner = runner
end
def execute(params)
params[:active] = !params.delete(:paused) if params.include?(:paused)
if runner.update(params)
runner.tick_runner_queue
ServiceResponse.success
else
ServiceResponse.error(message: runner.errors.full_messages)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Runners::UpdateRunnerService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(runner).execute(params) }
let(:runner) { create(:ci_runner) }
before do
allow(runner).to receive(:tick_runner_queue)
end
context 'with description params' do
let(:params) { { description: 'new runner' } }
it 'updates the runner and ticking the queue' do
expect(execute).to be_success
runner.reload
expect(runner).to have_received(:tick_runner_queue)
expect(runner.description).to eq('new runner')
end
end
context 'with paused param' do
let(:params) { { paused: true } }
it 'updates the runner and ticking the queue' do
expect(runner.active).to be_truthy
expect(execute).to be_success
runner.reload
expect(runner).to have_received(:tick_runner_queue)
expect(runner.active).to be_falsey
end
end
context 'with cost factor params' do
let(:params) { { public_projects_minutes_cost_factor: 1.1, private_projects_minutes_cost_factor: 2.2 } }
it 'updates the runner cost factors' do
expect(execute).to be_success
runner.reload
expect(runner.public_projects_minutes_cost_factor).to eq(1.1)
expect(runner.private_projects_minutes_cost_factor).to eq(2.2)
end
end
context 'when params are not valid' do
let(:params) { { run_untagged: false } }
it 'does not update and returns error because it is not valid' do
expect(execute).to be_error
runner.reload
expect(runner).not_to have_received(:tick_runner_queue)
expect(runner.run_untagged).to be_truthy
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class SetRunnerAssociatedProjectsService
# @param [Ci::Runner] runner: the project runner to assign/unassign projects from
# @param [User] current_user: the user performing the operation
# @param [Array<Integer>] project_ids: the IDs of the associated projects to assign the runner to
def initialize(runner:, current_user:, project_ids:)
@runner = runner
@current_user = current_user
@project_ids = project_ids
end
def execute
unless current_user&.can?(:assign_runner, runner)
return ServiceResponse.error(message: 'user not allowed to assign runner', http_status: :forbidden)
end
return ServiceResponse.success if project_ids.nil?
set_associated_projects
end
private
def set_associated_projects
new_project_ids = [runner.owner_project.id] + project_ids
response = ServiceResponse.success
runner.transaction do
# rubocop:disable CodeReuse/ActiveRecord
current_project_ids = runner.projects.ids
# rubocop:enable CodeReuse/ActiveRecord
response = associate_new_projects(new_project_ids, current_project_ids)
response = disassociate_old_projects(new_project_ids, current_project_ids) if response.success?
raise ActiveRecord::Rollback, response.errors unless response.success?
end
response
end
def associate_new_projects(new_project_ids, current_project_ids)
missing_projects = Project.id_in(new_project_ids - current_project_ids)
unless missing_projects.all? { |project| current_user.can?(:register_project_runners, project) }
return ServiceResponse.error(message: 'user is not authorized to add runners to project')
end
unless missing_projects.all? { |project| runner.assign_to(project, current_user) }
return ServiceResponse.error(message: 'failed to assign projects to runner')
end
ServiceResponse.success
end
def disassociate_old_projects(new_project_ids, current_project_ids)
projects_to_be_deleted = current_project_ids - new_project_ids
return ServiceResponse.success if projects_to_be_deleted.empty?
all_destroyed =
Ci::RunnerProject
.destroy_by(project_id: projects_to_be_deleted)
.all?(&:destroyed?)
return ServiceResponse.success if all_destroyed
ServiceResponse.error(message: 'failed to destroy runner project')
end
attr_reader :runner, :current_user, :project_ids
end
end
end
Ci::Runners::SetRunnerAssociatedProjectsService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::SetRunnerAssociatedProjectsService, '#execute', feature_category: :runner_fleet do
subject(:execute) do
described_class.new(runner: runner, current_user: user, project_ids: new_projects.map(&:id)).execute
end
let_it_be(:owner_project) { create(:project) }
let_it_be(:project2) { create(:project) }
let(:original_projects) { [owner_project, project2] }
let(:runner) { create(:ci_runner, :project, projects: original_projects) }
context 'without user' do
let(:user) { nil }
let(:new_projects) { [project2] }
it 'does not call assign_to on runner and returns error response', :aggregate_failures do
expect(runner).not_to receive(:assign_to)
expect(execute).to be_error
expect(execute.message).to eq('user not allowed to assign runner')
end
end
context 'with unauthorized user' do
let(:user) { create(:user) }
let(:new_projects) { [project2] }
it 'does not call assign_to on runner and returns error message' do
expect(runner).not_to receive(:assign_to)
expect(execute).to be_error
expect(execute.message).to eq('user not allowed to assign runner')
end
end
context 'with authorized user' do
let_it_be(:project3) { create(:project) }
let_it_be(:project4) { create(:project) }
let(:projects_with_maintainer_access) { original_projects }
before do
projects_with_maintainer_access.each { |project| project.add_maintainer(user) }
end
shared_context 'with successful requests' do
context 'when disassociating a project' do
let(:new_projects) { [project3, project4] }
it 'reassigns associated projects and returns success response' do
expect(execute).to be_success
runner.reload
expect(runner.owner_project).to eq(owner_project)
expect(runner.projects.ids).to match_array([owner_project.id] + new_projects.map(&:id))
end
end
context 'when disassociating no projects' do
let(:new_projects) { [project2, project3] }
it 'reassigns associated projects and returns success response' do
expect(execute).to be_success
runner.reload
expect(runner.owner_project).to eq(owner_project)
expect(runner.projects.ids).to match_array([owner_project.id] + new_projects.map(&:id))
end
end
context 'when disassociating all projects' do
let(:new_projects) { [] }
it 'reassigns associated projects and returns success response' do
expect(execute).to be_success
runner.reload
expect(runner.owner_project).to eq(owner_project)
expect(runner.projects.ids).to contain_exactly(owner_project.id)
end
end
end
shared_context 'with failing destroy calls' do
let(:new_projects) { [project3, project4] }
it 'returns error response and rolls back transaction' do
allow_next_found_instance_of(Ci::RunnerProject) do |runner_project|
allow(runner_project).to receive(:destroy).and_return(false)
end
expect(execute).to be_error
expect(runner.reload.projects.order(:id)).to eq(original_projects)
end
end
context 'with maintainer user' do
let(:user) { create(:user) }
let(:projects_with_maintainer_access) { original_projects + new_projects }
it_behaves_like 'with successful requests'
it_behaves_like 'with failing destroy calls'
context 'when associating new projects' do
let(:new_projects) { [project3, project4] }
context 'with missing permissions on one of the new projects' do
let(:projects_with_maintainer_access) { original_projects + [project3] }
it 'returns error response and rolls back transaction' do
expect(execute).to be_error
expect(execute.errors).to contain_exactly('user is not authorized to add runners to project')
expect(runner.reload.projects.order(:id)).to eq(original_projects)
end
end
end
end
context 'with admin user', :enable_admin_mode do
let(:user) { create(:user, :admin) }
let(:projects_with_maintainer_access) { original_projects + new_projects }
it_behaves_like 'with successful requests'
it_behaves_like 'with failing destroy calls'
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class BulkDeleteRunnersService
attr_reader :runners
RUNNER_LIMIT = 50
# @param runners [Array<Ci::Runner>] the runners to unregister/destroy
# @param current_user [User] the user performing the operation
def initialize(runners:, current_user:)
@runners = runners
@current_user = current_user
end
def execute
if @runners
# Delete a few runners immediately
return delete_runners
end
ServiceResponse.success(payload: { deleted_count: 0, deleted_ids: [], errors: [] })
end
private
def delete_runners
runner_count = @runners.limit(RUNNER_LIMIT + 1).count
authorized_runners_ids, unauthorized_runners_ids = compute_authorized_runners
# rubocop:disable CodeReuse/ActiveRecord
runners_to_be_deleted =
Ci::Runner
.where(id: authorized_runners_ids)
.preload([:taggings, :runner_namespaces, :runner_projects])
# rubocop:enable CodeReuse/ActiveRecord
deleted_ids = runners_to_be_deleted.destroy_all.map(&:id) # rubocop:disable Cop/DestroyAll
ServiceResponse.success(
payload: {
deleted_count: deleted_ids.count,
deleted_ids: deleted_ids,
errors: error_messages(runner_count, authorized_runners_ids, unauthorized_runners_ids)
})
end
def compute_authorized_runners
# rubocop:disable CodeReuse/ActiveRecord
@current_user.ci_owned_runners.load # preload the owned runners to avoid an N+1
authorized_runners, unauthorized_runners =
@runners.limit(RUNNER_LIMIT)
.partition { |runner| Ability.allowed?(@current_user, :delete_runner, runner) }
# rubocop:enable CodeReuse/ActiveRecord
[authorized_runners.map(&:id), unauthorized_runners.map(&:id)]
end
def error_messages(runner_count, authorized_runners_ids, unauthorized_runners_ids)
errors = []
if runner_count > RUNNER_LIMIT
errors << "Can only delete up to #{RUNNER_LIMIT} runners per call. Ignored the remaining runner(s)."
end
if authorized_runners_ids.empty?
errors << "User does not have permission to delete any of the runners"
elsif unauthorized_runners_ids.any?
failed_ids = unauthorized_runners_ids.map { |runner_id| "##{runner_id}" }.join(', ')
errors << "User does not have permission to delete runner(s) #{failed_ids}"
end
errors
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::BulkDeleteRunnersService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(**service_args).execute }
let_it_be(:admin_user) { create(:user, :admin) }
let_it_be_with_refind(:owner_user) { create(:user) } # discard memoized ci_owned_runners
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let(:user) {}
let(:service_args) { { runners: runners_arg, current_user: user } }
let(:runners_arg) {}
context 'with runners specified' do
let!(:instance_runner) { create(:ci_runner) }
let!(:group_runner) { create(:ci_runner, :group, groups: [group]) }
let!(:project_runner) { create(:ci_runner, :project, projects: [project]) }
shared_examples 'a service deleting runners in bulk' do
let!(:expected_deleted_ids) { expected_deleted_runners.map(&:id) }
it 'destroys runners', :aggregate_failures do
expect { execute }.to change { Ci::Runner.count }.by(-expected_deleted_ids.count)
is_expected.to be_success
expect(execute.payload).to eq(
{
deleted_count: expected_deleted_ids.count,
deleted_ids: expected_deleted_ids,
errors: []
})
expect { project_runner.runner_projects.first.reload }.to raise_error(ActiveRecord::RecordNotFound)
expected_deleted_runners.each do |deleted_runner|
expect(deleted_runner[:errors]).to be_nil
expect { deleted_runner.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
end
context 'with too many runners specified' do
before do
stub_const("#{described_class}::RUNNER_LIMIT", 1)
end
it 'deletes only first RUNNER_LIMIT runners', :aggregate_failures do
expect { execute }.to change { Ci::Runner.count }.by(-1)
is_expected.to be_success
expect(execute.payload).to eq(
{
deleted_count: 1,
deleted_ids: expected_deleted_ids.take(1),
errors: ["Can only delete up to 1 runners per call. Ignored the remaining runner(s)."]
})
end
end
end
context 'when the user cannot delete runners' do
let(:runners_arg) { Ci::Runner.all }
context 'when user is not group owner' do
before do
group.add_developer(user)
end
let(:user) { create(:user) }
it 'does not delete any runner and returns error', :aggregate_failures do
expect { execute }.not_to change { Ci::Runner.count }
expect(execute[:errors]).to match_array("User does not have permission to delete any of the runners")
end
end
context 'when user is not part of the group' do
let(:user) { create(:user) }
it 'does not delete any runner and returns error', :aggregate_failures do
expect { execute }.not_to change { Ci::Runner.count }
expect(execute[:errors]).to match_array("User does not have permission to delete any of the runners")
end
end
end
context 'when the user can delete runners' do
context 'when user is an admin', :enable_admin_mode do
include_examples 'a service deleting runners in bulk' do
let(:runners_arg) { Ci::Runner.all }
let!(:expected_deleted_runners) { [instance_runner, group_runner, project_runner] }
let(:user) { admin_user }
end
context 'with a runner already deleted' do
before do
group_runner.destroy!
end
include_examples 'a service deleting runners in bulk' do
let(:runners_arg) { Ci::Runner.all }
let!(:expected_deleted_runners) { [instance_runner, project_runner] }
let(:user) { admin_user }
end
end
context 'when deleting a single runner' do
let(:runners_arg) { Ci::Runner.all }
it 'avoids N+1 cached queries', :use_sql_query_cache, :request_store do
# Run this once to establish a baseline
control_count = ActiveRecord::QueryRecorder.new(skip_cached: false) do
execute
end
additional_runners = 1
create_list(:ci_runner, 1 + additional_runners, :instance)
create_list(:ci_runner, 1 + additional_runners, :group, groups: [group])
create_list(:ci_runner, 1 + additional_runners, :project, projects: [project])
service = described_class.new(runners: runners_arg, current_user: user)
# Base cost per runner is:
# - 1 `SELECT * FROM "taggings"` query
# - 1 `SAVEPOINT` query
# - 1 `DELETE FROM "ci_runners"` query
# - 1 `RELEASE SAVEPOINT` query
# Project runners have an additional query:
# - 1 `DELETE FROM "ci_runner_projects"` query, given the call to `destroy_all`
instance_runner_cost = 4
group_runner_cost = 4
project_runner_cost = 5
expect { service.execute }
.not_to exceed_all_query_limit(control_count)
.with_threshold(additional_runners * (instance_runner_cost + group_runner_cost + project_runner_cost))
end
end
end
context 'when user is group owner' do
before do
group.add_owner(user)
end
include_examples 'a service deleting runners in bulk' do
let(:runners_arg) { Ci::Runner.not_instance_type }
let!(:expected_deleted_runners) { [group_runner, project_runner] }
let(:user) { owner_user }
end
context 'with a runner non-authorised to be deleted' do
let(:runners_arg) { Ci::Runner.all }
let!(:expected_deleted_runners) { [project_runner] }
let(:user) { owner_user }
it 'destroys only authorised runners', :aggregate_failures do
allow(Ability).to receive(:allowed?).and_call_original
expect(Ability).to receive(:allowed?).with(user, :delete_runner, instance_runner).and_return(false)
expect { execute }.to change { Ci::Runner.count }.by(-2)
is_expected.to be_success
expect(execute.payload).to eq(
{
deleted_count: 2,
deleted_ids: [group_runner.id, project_runner.id],
errors: ["User does not have permission to delete runner(s) ##{instance_runner.id}"]
})
end
end
end
end
context 'with no arguments specified' do
let(:runners_arg) { nil }
let(:user) { owner_user }
it 'returns 0 deleted runners' do
is_expected.to be_success
expect(execute.payload).to eq({ deleted_count: 0, deleted_ids: [], errors: [] })
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class UnregisterRunnerManagerService
attr_reader :runner, :author, :system_id
# @param [Ci::Runner] runner the runner to unregister/destroy
# @param [User, authentication token String] author the user or the authentication token authorizing the removal
# @param [String] system_id ID of the system being unregistered
def initialize(runner, author, system_id:)
@runner = runner
@author = author
@system_id = system_id
end
def execute
return system_id_missing_error if system_id.blank?
runner_manager = runner.runner_managers.find_by_system_xid!(system_id)
runner_manager.destroy!
ServiceResponse.success
end
private
def system_id_missing_error
ServiceResponse.error(message: '`system_id` needs to be specified for runners created in the UI.')
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::UnregisterRunnerManagerService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(runner, 'some_token', system_id: system_id).execute }
context 'with runner registered with registration token' do
let!(:runner) { create(:ci_runner, registration_type: :registration_token) }
let(:system_id) { nil }
it 'does not destroy runner or runner managers' do
expect do
expect(execute).to be_error
end.to not_change { Ci::Runner.count }
.and not_change { Ci::RunnerManager.count }
expect(runner[:errors]).to be_nil
end
end
context 'with runner created in UI' do
let!(:runner_manager1) { create(:ci_runner_machine, runner: runner, system_xid: 'system_id_1') }
let!(:runner_manager2) { create(:ci_runner_machine, runner: runner, system_xid: 'system_id_2') }
let!(:runner) { create(:ci_runner, registration_type: :authenticated_user) }
context 'with system_id specified' do
let(:system_id) { runner_manager1.system_xid }
it 'destroys runner_manager1 and leaves runner', :aggregate_failures do
expect do
expect(execute).to be_success
end.to change { Ci::RunnerManager.count }.by(-1)
.and not_change { Ci::Runner.count }
expect(runner[:errors]).to be_nil
expect(runner.runner_managers).to contain_exactly(runner_manager2)
end
end
context 'with unknown system_id' do
let(:system_id) { 'unknown_system_id' }
it 'raises RecordNotFound error', :aggregate_failures do
expect do
execute
end.to raise_error(ActiveRecord::RecordNotFound)
.and not_change { Ci::Runner.count }
.and not_change { Ci::RunnerManager.count }
end
end
context 'with system_id missing' do
let(:system_id) { nil }
it 'returns error and leaves runner_manager1', :aggregate_failures do
expect do
expect(execute).to be_error
expect(execute.message).to eq('`system_id` needs to be specified for runners created in the UI.')
end.to not_change { Ci::Runner.count }
.and not_change { Ci::RunnerManager.count }
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class UnassignRunnerService
# @param [Ci::RunnerProject] runner_project the runner/project association to destroy
# @param [User] user the user performing the operation
def initialize(runner_project, user)
@runner_project = runner_project
@runner = runner_project.runner
@project = runner_project.project
@user = user
end
def execute
unless @user.present? && @user.can?(:assign_runner, @runner)
return ServiceResponse.error(message: 'user not allowed to assign runner')
end
if @runner_project.destroy
ServiceResponse.success
else
ServiceResponse.error(message: 'failed to destroy runner project')
end
end
private
attr_reader :runner, :project, :user
end
end
end
Ci::Runners::UnassignRunnerService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::UnassignRunnerService, '#execute', feature_category: :runner_fleet do
let_it_be(:project) { create(:project) }
let_it_be(:runner) { create(:ci_runner, :project, projects: [project]) }
let(:runner_project) { runner.runner_projects.last }
subject(:execute) { described_class.new(runner_project, user).execute }
context 'without user' do
let(:user) { nil }
it 'does not destroy runner_project', :aggregate_failures do
expect(runner_project).not_to receive(:destroy)
expect { execute }.not_to change { runner.runner_projects.count }.from(1)
is_expected.to be_error
end
end
context 'with unauthorized user' do
let(:user) { build(:user) }
it 'does not call destroy on runner_project' do
expect(runner).not_to receive(:destroy)
is_expected.to be_error
end
end
context 'with admin user', :enable_admin_mode do
let(:user) { create_default(:user, :admin) }
context 'with destroy returning false' do
it 'returns error response' do
expect(runner_project).to receive(:destroy).once.and_return(false)
is_expected.to be_error
end
end
context 'with destroy returning true' do
it 'returns success response' do
expect(runner_project).to receive(:destroy).once.and_return(true)
is_expected.to be_success
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class CreateRunnerService
RUNNER_CLASS_MAPPING = {
'instance_type' => Ci::Runners::RunnerCreationStrategies::InstanceRunnerStrategy,
'group_type' => Ci::Runners::RunnerCreationStrategies::GroupRunnerStrategy,
'project_type' => Ci::Runners::RunnerCreationStrategies::ProjectRunnerStrategy
}.freeze
def initialize(user:, params:)
@user = user
@params = params
@strategy = RUNNER_CLASS_MAPPING[params[:runner_type]].new(user: user, params: params)
end
def execute
normalize_params
error = strategy.validate_params
return ServiceResponse.error(message: error, reason: :validation_error) if error
unless strategy.authorized_user?
return ServiceResponse.error(message: _('Insufficient permissions'), reason: :forbidden)
end
runner = ::Ci::Runner.new(params)
return ServiceResponse.success(payload: { runner: runner }) if runner.save
ServiceResponse.error(message: runner.errors.full_messages, reason: :save_error)
end
def normalize_params
params[:registration_type] = :authenticated_user
params[:active] = !params.delete(:paused) if params.key?(:paused)
params[:creator] = user
strategy.normalize_params
end
private
attr_reader :user, :params, :strategy
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::CreateRunnerService, "#execute", feature_category: :runner_fleet do
subject(:execute) { described_class.new(user: current_user, params: params).execute }
let(:runner) { execute.payload[:runner] }
let_it_be(:admin) { create(:admin) }
let_it_be(:non_admin_user) { create(:user) }
let_it_be(:anonymous) { nil }
let_it_be(:group_owner) { create(:user) }
let_it_be(:group) { create(:group) }
shared_examples 'it can create a runner' do
it 'creates a runner of the specified type', :aggregate_failures do
is_expected.to be_success
expect(runner.runner_type).to eq expected_type
end
context 'with default params provided' do
let(:args) do
{}
end
before do
params.merge!(args)
end
it { is_expected.to be_success }
it 'uses default values when none are provided' do
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_truthy
expect(runner.run_untagged).to be true
expect(runner.active).to be true
expect(runner.creator).to be current_user
expect(runner.authenticated_user_registration_type?).to be_truthy
expect(runner.runner_type).to eq expected_type
end
end
context 'with non-default params provided' do
let(:args) do
{
description: 'some description',
maintenance_note: 'a note',
paused: true,
tag_list: %w[tag1 tag2],
access_level: 'ref_protected',
locked: true,
maximum_timeout: 600,
run_untagged: false
}
end
before do
params.merge!(args)
end
it { is_expected.to be_success }
it 'creates runner with specified values', :aggregate_failures do
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.description).to eq 'some description'
expect(runner.maintenance_note).to eq 'a note'
expect(runner.active).to eq !args[:paused]
expect(runner.locked).to eq args[:locked]
expect(runner.run_untagged).to eq args[:run_untagged]
expect(runner.tags).to contain_exactly(
an_object_having_attributes(name: 'tag1'),
an_object_having_attributes(name: 'tag2')
)
expect(runner.access_level).to eq args[:access_level]
expect(runner.maximum_timeout).to eq args[:maximum_timeout]
expect(runner.authenticated_user_registration_type?).to be_truthy
expect(runner.runner_type).to eq expected_type
end
context 'with a nil paused value' do
let(:args) do
{
paused: nil,
description: 'some description',
maintenance_note: 'a note',
tag_list: %w[tag1 tag2],
access_level: 'ref_protected',
locked: true,
maximum_timeout: 600,
run_untagged: false
}
end
it { is_expected.to be_success }
it 'creates runner with active set to true' do
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.active).to eq true
end
end
context 'with no paused value given' do
let(:args) do
{
description: 'some description',
maintenance_note: 'a note',
tag_list: %w[tag1 tag2],
access_level: 'ref_protected',
locked: true,
maximum_timeout: 600,
run_untagged: false
}
end
it { is_expected.to be_success }
it 'creates runner with active set to true' do
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.active).to eq true
end
end
end
end
shared_examples 'it cannot create a runner' do
it 'runner payload is nil' do
expect(runner).to be nil
end
it { is_expected.to be_error }
end
shared_examples 'it can return an error' do
let(:runner_double) { Ci::Runner.new }
context 'when the runner fails to save' do
before do
allow(Ci::Runner).to receive(:new).and_return runner_double
end
it_behaves_like 'it cannot create a runner'
it 'returns error message' do
expect(execute.errors).not_to be_empty
end
end
end
context 'with :runner_type param set to instance_type' do
let(:expected_type) { 'instance_type' }
let(:params) { { runner_type: 'instance_type' } }
context 'when anonymous user' do
let(:current_user) { anonymous }
it_behaves_like 'it cannot create a runner'
end
context 'when non-admin user' do
let(:current_user) { non_admin_user }
it_behaves_like 'it cannot create a runner'
end
context 'when admin user' do
let(:current_user) { admin }
it_behaves_like 'it cannot create a runner'
context 'when admin mode is enabled', :enable_admin_mode do
it_behaves_like 'it can create a runner'
it_behaves_like 'it can return an error'
context 'with unexpected scope param specified' do
let(:params) { { runner_type: 'instance_type', scope: group } }
it_behaves_like 'it cannot create a runner'
end
context 'when model validation fails' do
let(:params) { { runner_type: 'instance_type', run_untagged: false, tag_list: [] } }
it_behaves_like 'it cannot create a runner'
it 'returns error message and reason', :aggregate_failures do
expect(execute.reason).to eq(:save_error)
expect(execute.message).to contain_exactly(a_string_including('Tags list can not be empty'))
end
end
end
end
end
context 'with :runner_type param set to group_type' do
let(:expected_type) { 'group_type' }
let(:params) { { runner_type: 'group_type', scope: group } }
before do
group.add_developer(non_admin_user)
group.add_owner(group_owner)
end
context 'when anonymous user' do
let(:current_user) { anonymous }
it_behaves_like 'it cannot create a runner'
end
context 'when non-admin user' do
let(:current_user) { non_admin_user }
it_behaves_like 'it cannot create a runner'
end
context 'when group owner' do
let(:current_user) { group_owner }
it_behaves_like 'it can create a runner'
context 'with missing scope param' do
let(:params) { { runner_type: 'group_type' } }
it_behaves_like 'it cannot create a runner'
end
end
context 'when admin user' do
let(:current_user) { admin }
it_behaves_like 'it cannot create a runner'
context 'when admin mode is enabled', :enable_admin_mode do
it_behaves_like 'it can create a runner'
it_behaves_like 'it can return an error'
end
end
end
context 'with :runner_type param set to project_type' do
let_it_be(:project) { create(:project, namespace: group) }
let(:expected_type) { 'project_type' }
let(:params) { { runner_type: 'project_type', scope: project } }
before do
group.add_developer(non_admin_user)
group.add_owner(group_owner)
end
context 'when anonymous user' do
let(:current_user) { anonymous }
it_behaves_like 'it cannot create a runner'
end
context 'when group owner' do
let(:current_user) { group_owner }
it_behaves_like 'it can create a runner'
context 'with missing scope param' do
let(:params) { { runner_type: 'project_type' } }
it_behaves_like 'it cannot create a runner'
end
end
context 'when non-admin user' do
let(:current_user) { non_admin_user }
it_behaves_like 'it cannot create a runner'
context 'with project permissions to create runner' do
before do
project.add_maintainer(current_user)
end
it_behaves_like 'it can create a runner'
end
end
context 'when admin user' do
let(:current_user) { admin }
it_behaves_like 'it cannot create a runner'
context 'when admin mode is enabled', :enable_admin_mode do
it_behaves_like 'it can create a runner'
it_behaves_like 'it can return an error'
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class AssignRunnerService
# @param [Ci::Runner] runner: the runner to assign to a project
# @param [Project] project: the new project to assign the runner to
# @param [User] user: the user performing the operation
def initialize(runner, project, user)
@runner = runner
@project = project
@user = user
end
def execute
unless @user.present? && @user.can?(:assign_runner, @runner)
return ServiceResponse.error(message: 'user not allowed to assign runner', http_status: :forbidden)
end
unless @user.can?(:register_project_runners, @project)
return ServiceResponse.error(message: 'user not allowed to add runners to project', http_status: :forbidden)
end
if @runner.assign_to(@project, @user)
ServiceResponse.success
else
ServiceResponse.error(message: 'failed to assign runner')
end
end
private
attr_reader :runner, :project, :user
end
end
end
Ci::Runners::AssignRunnerService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::AssignRunnerService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(runner, new_project, user).execute }
let_it_be(:owner_group) { create(:group) }
let_it_be(:owner_project) { create(:project, group: owner_group) }
let_it_be(:new_project) { create(:project) }
let_it_be(:runner) { create(:ci_runner, :project, projects: [owner_project]) }
context 'without user' do
let(:user) { nil }
it 'does not call assign_to on runner and returns error response', :aggregate_failures do
expect(runner).not_to receive(:assign_to)
is_expected.to be_error
expect(execute.message).to eq('user not allowed to assign runner')
end
end
context 'with unauthorized user' do
let(:user) { build(:user) }
it 'does not call assign_to on runner and returns error message' do
expect(runner).not_to receive(:assign_to)
is_expected.to be_error
expect(execute.message).to eq('user not allowed to assign runner')
end
end
context 'with authorized user' do
let(:user) { create(:user) }
context 'with user owning runner and being maintainer of new project' do
before do
owner_project.group.add_owner(user)
new_project.add_maintainer(user)
end
it 'calls assign_to on runner and returns success response' do
expect(runner).to receive(:assign_to).with(new_project, user).once.and_call_original
is_expected.to be_success
end
end
context 'with user owning runner' do
before do
owner_project.add_maintainer(user)
end
it 'does not call assign_to on runner and returns error message', :aggregate_failures do
expect(runner).not_to receive(:assign_to)
is_expected.to be_error
expect(execute.message).to eq('user not allowed to add runners to project')
end
end
context 'with user being maintainer of new project', :aggregate_failures do
before do
new_project.add_maintainer(user)
end
it 'does not call assign_to on runner and returns error message' do
expect(runner).not_to receive(:assign_to)
is_expected.to be_error
expect(execute.message).to eq('user not allowed to assign runner')
end
end
end
context 'with admin user', :enable_admin_mode do
let(:user) { create(:user, :admin) }
it 'calls assign_to on runner and returns success response' do
expect(runner).to receive(:assign_to).with(new_project, user).once.and_call_original
is_expected.to be_success
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class ReconcileExistingRunnerVersionsService
VERSION_BATCH_SIZE = 100
def execute
insert_result = insert_runner_versions
total_deleted = cleanup_runner_versions(insert_result[:versions_from_runners])
total_updated = update_status_on_outdated_runner_versions(insert_result[:versions_from_runners])
ServiceResponse.success(payload: {
total_inserted: insert_result[:new_record_count],
total_updated: total_updated,
total_deleted: total_deleted
})
end
private
def upgrade_check
@runner_upgrade_check ||= Gitlab::Ci::RunnerUpgradeCheck.new(::Gitlab::VERSION)
end
# rubocop: disable CodeReuse/ActiveRecord
def insert_runner_versions
versions_from_runners = Set[]
new_record_count = 0
Ci::Runner.distinct_each_batch(column: :version, of: VERSION_BATCH_SIZE) do |version_batch|
batch_versions = version_batch.pluck(:version).to_set
versions_from_runners += batch_versions
# Avoid hitting primary DB
already_existing_versions = Ci::RunnerVersion.where(version: batch_versions).pluck(:version)
new_versions = batch_versions - already_existing_versions
if new_versions.any?
new_record_count += Ci::RunnerVersion.insert_all(
new_versions.map { |v| { version: v } },
returning: :version,
unique_by: :version).count
end
end
{ versions_from_runners: versions_from_runners, new_record_count: new_record_count }
end
def cleanup_runner_versions(versions_from_runners)
Ci::RunnerVersion.where.not(version: versions_from_runners).delete_all
end
# rubocop: enable CodeReuse/ActiveRecord
def outdated_runner_versions
Ci::RunnerVersion.potentially_outdated
end
def update_status_on_outdated_runner_versions(versions_from_runners)
total_updated = 0
outdated_runner_versions.each_batch(of: VERSION_BATCH_SIZE) do |version_batch|
updated = version_batch
.select { |runner_version| versions_from_runners.include?(runner_version['version']) }
.filter_map { |runner_version| runner_version_with_updated_status(runner_version) }
if updated.any?
total_updated += Ci::RunnerVersion.upsert_all(updated, unique_by: :version).count
end
end
total_updated
end
def runner_version_with_updated_status(runner_version)
_, new_status = upgrade_check.check_runner_upgrade_suggestion(runner_version.version)
if new_status != :error && new_status != runner_version.status.to_sym
{
version: runner_version.version,
status: Ci::RunnerVersion.statuses[new_status]
}
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::ReconcileExistingRunnerVersionsService, '#execute', feature_category: :runner_fleet do
include RunnerReleasesHelper
subject(:execute) { described_class.new.execute }
let_it_be(:runner_14_0_1) { create(:ci_runner, version: '14.0.1') }
let_it_be(:runner_version_14_0_1) do
create(:ci_runner_version, version: '14.0.1', status: :unavailable)
end
context 'with RunnerUpgradeCheck recommending 14.0.2' do
let(:upgrade_check) { instance_double(::Gitlab::Ci::RunnerUpgradeCheck) }
before do
stub_const('Ci::Runners::ReconcileExistingRunnerVersionsService::VERSION_BATCH_SIZE', 1)
allow(::Gitlab::Ci::RunnerUpgradeCheck).to receive(:new).and_return(upgrade_check).once
end
context 'with runner with new version' do
let!(:runner_14_0_2) { create(:ci_runner, version: '14.0.2') }
let!(:runner_14_0_0) { create(:ci_runner, version: '14.0.0') }
let!(:runner_version_14_0_0) do
create(:ci_runner_version, version: '14.0.0', status: :unavailable)
end
before do
allow(upgrade_check).to receive(:check_runner_upgrade_suggestion)
.and_return([::Gitlab::VersionInfo.new(14, 0, 2), :recommended])
allow(upgrade_check).to receive(:check_runner_upgrade_suggestion)
.with('14.0.2')
.and_return([::Gitlab::VersionInfo.new(14, 0, 2), :unavailable])
.once
end
it 'creates and updates expected ci_runner_versions entries', :aggregate_failures do
expect(Ci::RunnerVersion).to receive(:insert_all)
.ordered
.with([{ version: '14.0.2' }], anything)
.once
.and_call_original
expect { execute }
.to change { runner_version_14_0_0.reload.status }.from('unavailable').to('recommended')
.and change { runner_version_14_0_1.reload.status }.from('unavailable').to('recommended')
.and change { ::Ci::RunnerVersion.find_by(version: '14.0.2')&.status }.from(nil).to('unavailable')
expect(execute).to be_success
expect(execute.payload).to eq({
total_inserted: 1, # 14.0.2 is inserted
total_updated: 3, # 14.0.0, 14.0.1 are updated, and newly inserted 14.0.2's status is calculated
total_deleted: 0
})
end
end
context 'with orphan ci_runner_version' do
let!(:runner_version_14_0_2) do
create(:ci_runner_version, version: '14.0.2', status: :unavailable)
end
before do
allow(upgrade_check).to receive(:check_runner_upgrade_suggestion)
.and_return([::Gitlab::VersionInfo.new(14, 0, 2), :unavailable])
end
it 'deletes orphan ci_runner_versions entry', :aggregate_failures do
expect { execute }
.to change { ::Ci::RunnerVersion.find_by_version('14.0.2')&.status }.from('unavailable').to(nil)
.and not_change { runner_version_14_0_1.reload.status }.from('unavailable')
expect(execute).to be_success
expect(execute.payload).to eq({
total_inserted: 0,
total_updated: 0,
total_deleted: 1 # 14.0.2 is deleted
})
end
end
context 'with no runner version changes' do
before do
allow(upgrade_check).to receive(:check_runner_upgrade_suggestion)
.and_return([::Gitlab::VersionInfo.new(14, 0, 1), :unavailable])
end
it 'does not modify ci_runner_versions entries', :aggregate_failures do
expect { execute }.not_to change { runner_version_14_0_1.reload.status }.from('unavailable')
expect(execute).to be_success
expect(execute.payload).to eq({
total_inserted: 0,
total_updated: 0,
total_deleted: 0
})
end
end
context 'with failing version check' do
before do
allow(upgrade_check).to receive(:check_runner_upgrade_suggestion)
.and_return([::Gitlab::VersionInfo.new(14, 0, 1), :error])
end
it 'makes no changes to ci_runner_versions', :aggregate_failures do
expect { execute }.not_to change { runner_version_14_0_1.reload.status }.from('unavailable')
expect(execute).to be_success
expect(execute.payload).to eq({
total_inserted: 0,
total_updated: 0,
total_deleted: 0
})
end
end
end
context 'integration testing with Gitlab::Ci::RunnerUpgradeCheck' do
before do
stub_runner_releases(%w[14.0.0 14.0.1])
end
it 'does not modify ci_runner_versions entries', :aggregate_failures do
expect { execute }.not_to change { runner_version_14_0_1.reload.status }.from('unavailable')
expect(execute).to be_success
expect(execute.payload).to eq({
total_inserted: 0,
total_updated: 0,
total_deleted: 0
})
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class ResetRegistrationTokenService
# @param [ApplicationSetting, Project, Group] scope: the scope of the reset operation
# @param [User] user: the user performing the operation
def initialize(scope, user)
@scope = scope
@user = user
end
def execute
unless @user.present? && @user.can?(:update_runners_registration_token, scope)
return ServiceResponse.error(message: 'user not allowed to update runners registration token')
end
if scope.respond_to?(:runners_registration_token)
scope.reset_runners_registration_token!
runners_token = scope.runners_registration_token
else
scope.reset_runners_token!
runners_token = scope.runners_token
end
ServiceResponse.success(payload: { new_registration_token: runners_token })
end
private
attr_reader :scope, :user
end
end
end
Ci::Runners::ResetRegistrationTokenService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::ResetRegistrationTokenService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(scope, current_user).execute }
let_it_be(:user) { build(:user) }
let_it_be(:admin_user) { create(:user, :admin) }
shared_examples 'a registration token reset operation' do
context 'without user' do
let(:current_user) { nil }
it 'does not reset registration token and returns error response' do
expect(scope).not_to receive(token_reset_method_name)
expect(execute).to be_error
end
end
context 'with unauthorized user' do
let(:current_user) { user }
it 'does not reset registration token and returns error response' do
expect(scope).not_to receive(token_reset_method_name)
expect(execute).to be_error
end
end
context 'with admin user', :enable_admin_mode do
let(:current_user) { admin_user }
it 'resets registration token and returns value unchanged' do
expect(scope).to receive(token_reset_method_name).once do
expect(scope).to receive(token_method_name).once.and_return("#{token_method_name} return value")
end
expect(execute).to be_success
expect(execute.payload[:new_registration_token]).to eq("#{token_method_name} return value")
end
end
end
context 'with instance scope' do
let_it_be(:scope) { create(:application_setting) }
before do
allow(ApplicationSetting).to receive(:current).and_return(scope)
allow(ApplicationSetting).to receive(:current_without_cache).and_return(scope)
end
it_behaves_like 'a registration token reset operation' do
let(:token_method_name) { :runners_registration_token }
let(:token_reset_method_name) { :reset_runners_registration_token! }
end
end
context 'with group scope' do
let_it_be(:scope) { create(:group) }
it_behaves_like 'a registration token reset operation' do
let(:token_method_name) { :runners_token }
let(:token_reset_method_name) { :reset_runners_token! }
end
end
context 'with project scope' do
let_it_be(:scope) { create(:project) }
it_behaves_like 'a registration token reset operation' do
let(:token_method_name) { :runners_token }
let(:token_reset_method_name) { :reset_runners_token! }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class UnregisterRunnerService
attr_reader :runner, :author
# @param [Ci::Runner] runner the runner to unregister/destroy
# @param [User, authentication token String] author the user or the authentication token that authorizes the removal
def initialize(runner, author)
@runner = runner
@author = author
end
def execute
runner.destroy!
ServiceResponse.success
end
end
end
end
Ci::Runners::UnregisterRunnerService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::UnregisterRunnerService, '#execute', feature_category: :runner_fleet do
subject(:execute) { described_class.new(runner, 'some_token').execute }
let(:runner) { create(:ci_runner) }
it 'destroys runner' do
expect(runner).to receive(:destroy).once.and_call_original
expect do
expect(execute).to be_success
end.to change { Ci::Runner.count }.by(-1)
expect(runner[:errors]).to be_nil
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class StaleManagersCleanupService
MAX_DELETIONS = 1000
SUB_BATCH_LIMIT = 100
def execute
ServiceResponse.success(payload: delete_stale_runner_managers)
end
private
def delete_stale_runner_managers
batch_counts = []
total_deleted_count = 0
loop do
sub_batch_limit = [SUB_BATCH_LIMIT, MAX_DELETIONS].min
# delete_all discards part of the `stale` scope query, so we explicitly wrap it with a SELECT as a workaround
deleted_count = Ci::RunnerManager.id_in(Ci::RunnerManager.stale.limit(sub_batch_limit)).delete_all
batch_counts << deleted_count
total_deleted_count += deleted_count
break if deleted_count == 0 || total_deleted_count >= MAX_DELETIONS
end
{
total_deleted: total_deleted_count,
batch_counts: batch_counts
}
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Runners::StaleManagersCleanupService, feature_category: :runner_fleet do
let(:service) { described_class.new }
let!(:runner_manager3) { create(:ci_runner_machine, created_at: 6.months.ago, contacted_at: Time.current) }
subject(:response) { service.execute }
context 'with no stale runner managers' do
it 'does not clean any runner managers and returns :success status' do
expect do
expect(response).to be_success
expect(response.payload).to match({ total_deleted: 0, batch_counts: [0] })
end.not_to change { Ci::RunnerManager.count }.from(1)
end
end
context 'with some stale runner managers' do
before do
create(:ci_runner_machine, :stale)
create(:ci_runner_machine, :stale, contacted_at: nil)
end
it 'only leaves non-stale runners' do
expect(response).to be_success
expect(response.payload).to match({ total_deleted: 2, batch_counts: [2, 0] })
expect(Ci::RunnerManager.all).to contain_exactly(runner_manager3)
end
context 'with more stale runners than SUB_BATCH_LIMIT' do
before do
stub_const("#{described_class}::SUB_BATCH_LIMIT", 1)
end
it 'only leaves non-stale runners' do
expect(response).to be_success
expect(response.payload).to match({ total_deleted: 2, batch_counts: [1, 1, 0] })
expect(Ci::RunnerManager.all).to contain_exactly(runner_manager3)
end
end
context 'with more stale runners than MAX_DELETIONS' do
before do
stub_const("#{described_class}::MAX_DELETIONS", 1)
end
it 'only leaves non-stale runners' do
expect do
expect(response).to be_success
expect(response.payload).to match({
total_deleted: Ci::Runners::StaleManagersCleanupService::MAX_DELETIONS,
batch_counts: [1]
})
end.to change { Ci::RunnerManager.count }.by(-Ci::Runners::StaleManagersCleanupService::MAX_DELETIONS)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class RegisterRunnerService
include Gitlab::Utils::StrongMemoize
def initialize(registration_token, attributes)
@registration_token = registration_token
@attributes = attributes
end
def execute
return ServiceResponse.error(message: 'invalid token supplied', http_status: :forbidden) unless attrs_from_token
unless registration_token_allowed?(attrs_from_token)
return ServiceResponse.error(
message: 'runner registration disallowed',
reason: :runner_registration_disallowed)
end
runner = ::Ci::Runner.new(attributes.merge(attrs_from_token))
Ci::BulkInsertableTags.with_bulk_insert_tags do
Ci::Runner.transaction do
if runner.save
Gitlab::Ci::Tags::BulkInsert.bulk_insert_tags!([runner])
else
raise ActiveRecord::Rollback
end
end
end
ServiceResponse.success(payload: { runner: runner })
end
private
attr_reader :registration_token, :attributes
def attrs_from_token
if runner_registration_token_valid?(registration_token)
# Create shared runner. Requires admin access
{ runner_type: :instance_type }
elsif runner_registrar_valid?('project') && project = ::Project.find_by_runners_token(registration_token)
# Create a project runner
{ runner_type: :project_type, projects: [project] }
elsif runner_registrar_valid?('group') && group = ::Group.find_by_runners_token(registration_token)
# Create a group runner
{ runner_type: :group_type, groups: [group] }
end
end
strong_memoize_attr :attrs_from_token
def registration_token_allowed?(attrs)
case attrs[:runner_type]
when :group_type
token_scope.allow_runner_registration_token?
when :project_type
token_scope.namespace.allow_runner_registration_token?
else
Gitlab::CurrentSettings.allow_runner_registration_token
end
end
def runner_registration_token_valid?(registration_token)
ActiveSupport::SecurityUtils.secure_compare(registration_token, Gitlab::CurrentSettings.runners_registration_token)
end
def runner_registrar_valid?(type)
Gitlab::CurrentSettings.valid_runner_registrars.include?(type)
end
def token_scope
case attrs_from_token[:runner_type]
when :project_type
attrs_from_token[:projects]&.first
when :group_type
attrs_from_token[:groups]&.first
# No scope for instance type
end
end
end
end
end
Ci::Runners::RegisterRunnerService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute', feature_category: :runner_fleet do
let(:registration_token) { 'abcdefg123456' }
let(:token) {}
let(:args) { {} }
let(:runner) { execute.payload[:runner] }
let(:allow_runner_registration_token) { true }
before do
stub_application_setting(runners_registration_token: registration_token)
stub_application_setting(valid_runner_registrars: ApplicationSetting::VALID_RUNNER_REGISTRAR_TYPES)
stub_application_setting(allow_runner_registration_token: allow_runner_registration_token)
end
subject(:execute) { described_class.new(token, args).execute }
shared_examples 'runner registration is disallowed' do
it 'returns error response with runner_registration_disallowed reason' do
expect(execute).to be_error
expect(execute.message).to eq 'runner registration disallowed'
expect(execute.reason).to eq :runner_registration_disallowed
end
end
context 'when no token is provided' do
let(:token) { '' }
it 'returns error response' do
expect(execute).to be_error
expect(execute.message).to eq 'invalid token supplied'
expect(execute.http_status).to eq :forbidden
end
end
context 'when invalid token is provided' do
let(:token) { 'invalid' }
it 'returns error response' do
expect(execute).to be_error
expect(execute.message).to eq 'invalid token supplied'
expect(execute.http_status).to eq :forbidden
end
end
context 'when valid token is provided' do
context 'when instance registration token is used' do
let(:token) { registration_token }
it 'creates runner with default values' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_truthy
expect(runner.run_untagged).to be true
expect(runner.active).to be true
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to start_with(::Ci::Runner::CREATED_RUNNER_TOKEN_PREFIX)
expect(runner).to be_instance_type
end
context 'when registering instance runners is disallowed' do
let(:allow_runner_registration_token) { false }
it_behaves_like 'runner registration is disallowed'
end
context 'with non-default arguments' do
let(:args) do
{
description: 'some description',
active: false,
locked: true,
run_untagged: false,
tag_list: %w[tag1 tag2],
access_level: 'ref_protected',
maximum_timeout: 600,
name: 'some name',
version: 'some version',
revision: 'some revision',
platform: 'some platform',
architecture: 'some architecture',
ip_address: '10.0.0.1',
config: {
gpus: 'some gpu config'
}
}
end
it 'creates runner with specified values', :aggregate_failures do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.active).to eq args[:active]
expect(runner.locked).to eq args[:locked]
expect(runner.run_untagged).to eq args[:run_untagged]
expect(runner.tags).to contain_exactly(
an_object_having_attributes(name: 'tag1'),
an_object_having_attributes(name: 'tag2')
)
expect(runner.access_level).to eq args[:access_level]
expect(runner.maximum_timeout).to eq args[:maximum_timeout]
expect(runner.name).to eq args[:name]
expect(runner.version).to eq args[:version]
expect(runner.revision).to eq args[:revision]
expect(runner.platform).to eq args[:platform]
expect(runner.architecture).to eq args[:architecture]
expect(runner.ip_address).to eq args[:ip_address]
expect(Ci::Runner.tagged_with('tag1')).to include(runner)
expect(Ci::Runner.tagged_with('tag2')).to include(runner)
end
end
context 'with runner token expiration interval', :freeze_time do
before do
stub_application_setting(runner_token_expiration_interval: 5.days)
end
it 'creates runner with token expiration' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.token_expires_at).to eq(5.days.from_now)
end
end
end
context 'when project registration token is used' do
let_it_be(:project) { create(:project, :with_namespace_settings) }
let(:token) { project.runners_token }
let(:allow_group_runner_registration_token) { true }
before do
project.namespace.update!(allow_runner_registration_token: allow_group_runner_registration_token)
end
it 'creates project runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(project.runners.size).to eq(1)
expect(runner).to eq(project.runners.first)
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(project.runners_token)
expect(runner).to be_project_type
end
context 'with runner registration disabled at instance level' do
let(:allow_runner_registration_token) { false }
it_behaves_like 'runner registration is disallowed'
end
context 'with runner registration disabled at group level' do
let(:allow_group_runner_registration_token) { false }
it_behaves_like 'runner registration is disallowed'
end
context 'when it exceeds the application limits' do
before do
create(:ci_runner, runner_type: :project_type, projects: [project], contacted_at: 1.second.ago)
create(:plan_limits, :default_plan, ci_registered_project_runners: 1)
end
it 'does not create runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_falsey
expect(runner.errors.messages).to eq(
'runner_projects.base': ['Maximum number of ci registered project runners (1) exceeded']
)
expect(project.runners.reload.size).to eq(1)
end
end
context 'when abandoned runners cause application limits to not be exceeded' do
before do
create(:ci_runner, runner_type: :project_type, projects: [project], created_at: 14.months.ago, contacted_at: 13.months.ago)
create(:plan_limits, :default_plan, ci_registered_project_runners: 1)
end
it 'creates runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(project.runners.reload.size).to eq(2)
expect(project.runners.recent.size).to eq(1)
end
end
context 'when valid runner registrars do not include project' do
before do
stub_application_setting(valid_runner_registrars: ['group'])
end
it 'returns 403 error' do
expect(execute).to be_error
expect(execute.http_status).to eq :forbidden
end
end
end
context 'when group registration token is used' do
let_it_be_with_refind(:group) { create(:group) }
let(:token) { group.runners_token }
let(:allow_group_runner_registration_token) { true }
before do
group.update!(allow_runner_registration_token: allow_group_runner_registration_token)
end
it 'creates a group runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(group.runners.reload.size).to eq(1)
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(group.runners_token)
expect(runner).to be_group_type
end
context 'with runner registration disabled at instance level' do
let(:allow_runner_registration_token) { false }
it_behaves_like 'runner registration is disallowed'
end
context 'with runner registration disabled at group level' do
let(:allow_group_runner_registration_token) { false }
it_behaves_like 'runner registration is disallowed'
end
context 'when it exceeds the application limits' do
before do
create(:ci_runner, runner_type: :group_type, groups: [group], contacted_at: nil, created_at: 1.month.ago)
create(:plan_limits, :default_plan, ci_registered_group_runners: 1)
end
it 'does not create runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_falsey
expect(runner.errors.messages).to eq(
'runner_namespaces.base': ['Maximum number of ci registered group runners (1) exceeded']
)
expect(group.runners.reload.size).to eq(1)
end
end
context 'when abandoned runners cause application limits to not be exceeded' do
before do
create(:ci_runner, runner_type: :group_type, groups: [group], created_at: 4.months.ago, contacted_at: 3.months.ago)
create(:ci_runner, runner_type: :group_type, groups: [group], contacted_at: nil, created_at: 4.months.ago)
create(:plan_limits, :default_plan, ci_registered_group_runners: 1)
end
it 'creates runner' do
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(group.runners.reload.size).to eq(3)
expect(group.runners.recent.size).to eq(1)
end
end
context 'when valid runner registrars do not include group' do
before do
stub_application_setting(valid_runner_registrars: ['project'])
end
it 'returns error response' do
is_expected.to be_error
end
end
end
context 'when tags are provided' do
let(:token) { registration_token }
let(:args) do
{ tag_list: %w[tag1 tag2] }
end
it 'creates runner with tags' do
expect(runner).to be_persisted
expect(runner.tags).to contain_exactly(
an_object_having_attributes(name: 'tag1'),
an_object_having_attributes(name: 'tag2')
)
end
it 'creates tags in bulk' do
expect(Gitlab::Ci::Tags::BulkInsert).to receive(:bulk_insert_tags!).and_call_original
expect(runner).to be_persisted
end
context 'and tag list exceeds limit' do
let(:args) do
{ tag_list: (1..Ci::Runner::TAG_LIST_MAX_LENGTH + 1).map { |i| "tag#{i}" } }
end
it 'does not create any tags' do
expect(Gitlab::Ci::Tags::BulkInsert).not_to receive(:bulk_insert_tags!)
expect(runner).not_to be_persisted
expect(runner.tags).to be_empty
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Runners
class ProcessRunnerVersionUpdateService
def initialize(version)
@version = version
end
def execute
return ServiceResponse.error(message: 'version update disabled') unless enabled?
return ServiceResponse.error(message: 'version not present') unless @version
_, status = upgrade_check_service.check_runner_upgrade_suggestion(@version)
return ServiceResponse.error(message: 'upgrade version check failed') if status == :error
Ci::RunnerVersion.upsert({ version: @version, status: status })
ServiceResponse.success(payload: { upgrade_status: status.to_s })
end
private
def upgrade_check_service
@runner_upgrade_check ||= Gitlab::Ci::RunnerUpgradeCheck.new(::Gitlab::VERSION)
end
def enabled?
Gitlab::Ci::RunnerReleases.instance.enabled?
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::Runners::ProcessRunnerVersionUpdateService, feature_category: :runner_fleet do
subject(:service) { described_class.new(version) }
let(:version) { '1.0.0' }
let(:available_runner_releases) { %w[1.0.0 1.0.1] }
describe '#execute' do
subject(:execute) { service.execute }
context 'with upgrade check returning error' do
let(:service_double) { instance_double(Gitlab::Ci::RunnerUpgradeCheck) }
before do
allow(service_double).to receive(:check_runner_upgrade_suggestion).with(version)
.and_return([version, :error])
allow(service).to receive(:upgrade_check_service).and_return(service_double)
end
it 'does not update ci_runner_versions records', :aggregate_failures do
expect do
expect(execute).to be_error
expect(execute.message).to eq 'upgrade version check failed'
end.not_to change(Ci::RunnerVersion, :count).from(0)
expect(service_double).to have_received(:check_runner_upgrade_suggestion).with(version).once
end
end
context 'when fetching runner releases is disabled' do
before do
stub_application_setting(update_runner_versions_enabled: false)
end
it 'does not update ci_runner_versions records', :aggregate_failures do
expect do
expect(execute).to be_error
expect(execute.message).to eq 'version update disabled'
end.not_to change(Ci::RunnerVersion, :count).from(0)
end
end
context 'with successful result from upgrade check' do
before do
url = ::Gitlab::CurrentSettings.current_application_settings.public_runner_releases_url
WebMock.stub_request(:get, url).to_return(
body: available_runner_releases.map { |v| { name: v } }.to_json,
status: 200,
headers: { 'Content-Type' => 'application/json' }
)
end
context 'with no existing ci_runner_version record' do
it 'creates ci_runner_versions record', :aggregate_failures do
expect do
expect(execute).to be_success
expect(execute.http_status).to eq :ok
expect(execute.payload).to eq({ upgrade_status: 'recommended' })
end.to change(Ci::RunnerVersion, :all).to contain_exactly(
an_object_having_attributes(version: version, status: 'recommended')
)
end
end
context 'with existing ci_runner_version record' do
let!(:runner_version) { create(:ci_runner_version, version: '1.0.0', status: :unavailable) }
it 'updates ci_runner_versions record', :aggregate_failures do
expect do
expect(execute).to be_success
expect(execute.http_status).to eq :ok
expect(execute.payload).to eq({ upgrade_status: 'recommended' })
end.to change { runner_version.reload.status }.from('unavailable').to('recommended')
end
end
context 'with up-to-date ci_runner_version record' do
let!(:runner_version) { create(:ci_runner_version, version: '1.0.0', status: :recommended) }
it 'does not update ci_runner_versions record', :aggregate_failures do
expect do
expect(execute).to be_success
expect(execute.http_status).to eq :ok
expect(execute.payload).to eq({ upgrade_status: 'recommended' })
end.not_to change { runner_version.reload.status }
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class DeleteProjectArtifactsService < BaseProjectService
def execute
ExpireProjectBuildArtifactsWorker.perform_async(project.id)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DeleteProjectArtifactsService, feature_category: :build_artifacts do
let_it_be(:project) { create(:project) }
subject { described_class.new(project: project) }
describe '#execute' do
it 'enqueues a Ci::ExpireProjectBuildArtifactsWorker' do
expect(Ci::JobArtifacts::ExpireProjectBuildArtifactsWorker).to receive(:perform_async).with(project.id).and_call_original
subject.execute
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class TrackArtifactReportService
include Gitlab::Utils::UsageData
REPORT_TRACKED = %i[test coverage].freeze
def execute(pipeline)
REPORT_TRACKED.each do |report|
if pipeline.complete_and_has_reports?(Ci::JobArtifact.of_report_type(report))
track_usage_event(event_name(report), pipeline.user_id)
end
end
end
def event_name(report)
"i_testing_#{report}_report_uploaded"
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::TrackArtifactReportService, feature_category: :build_artifacts do
describe '#execute', :clean_gitlab_redis_shared_state do
let_it_be(:group) { create(:group, :private) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:user1) { create(:user) }
let_it_be(:user2) { create(:user) }
let(:test_event_name_1) { 'i_testing_test_report_uploaded' }
let(:test_event_name_2) { 'i_testing_coverage_report_uploaded' }
let(:counter) { Gitlab::UsageDataCounters::HLLRedisCounter }
let(:start_time) { 1.week.ago }
let(:end_time) { 1.week.from_now }
subject(:track_artifact_report) { described_class.new.execute(pipeline) }
context 'when pipeline has test reports' do
let_it_be(:pipeline) { create(:ci_pipeline, project: project, user: user1) }
before do
2.times do
pipeline.builds << build(:ci_build, :test_reports, pipeline: pipeline, project: pipeline.project)
end
end
it 'tracks the test event using HLLRedisCounter' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_1, values: user1.id)
.and_call_original
expect { track_artifact_report }.to change {
counter.unique_events(event_names: test_event_name_1, start_date: start_time, end_date: end_time)
}.by 1
end
end
context 'when pipeline does not have test reports' do
let_it_be(:pipeline) { create(:ci_empty_pipeline) }
it 'does not track the test event' do
track_artifact_report
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.not_to receive(:track_event)
.with(anything, test_event_name_1)
end
it 'does not track the coverage test event' do
track_artifact_report
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.not_to receive(:track_event)
.with(anything, test_event_name_2)
end
end
context 'when a single user started multiple pipelines with test reports' do
let_it_be(:pipeline1) { create(:ci_pipeline, :with_test_reports, project: project, user: user1) }
let_it_be(:pipeline2) { create(:ci_pipeline, :with_test_reports, project: project, user: user1) }
it 'tracks all pipelines using HLLRedisCounter by one user_id for the test event' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_1, values: user1.id)
.and_call_original
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_1, values: user1.id)
.and_call_original
expect do
described_class.new.execute(pipeline1)
described_class.new.execute(pipeline2)
end.to change {
counter.unique_events(event_names: test_event_name_1, start_date: start_time, end_date: end_time)
}.by 1
end
end
context 'when multiple users started multiple pipelines with test reports' do
let_it_be(:pipeline1) { create(:ci_pipeline, :with_test_reports, project: project, user: user1) }
let_it_be(:pipeline2) { create(:ci_pipeline, :with_test_reports, project: project, user: user2) }
it 'tracks all pipelines using HLLRedisCounter by multiple users for test reports' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_1, values: user1.id)
.and_call_original
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_1, values: user2.id)
.and_call_original
expect do
described_class.new.execute(pipeline1)
described_class.new.execute(pipeline2)
end.to change {
counter.unique_events(event_names: test_event_name_1, start_date: start_time, end_date: end_time)
}.by 2
end
end
context 'when pipeline has coverage test reports' do
let_it_be(:pipeline) { create(:ci_pipeline, project: project, user: user1) }
before do
2.times do
pipeline.builds << build(:ci_build, :coverage_reports, pipeline: pipeline, project: pipeline.project)
end
end
it 'tracks the coverage test event using HLLRedisCounter' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_2, values: user1.id)
.and_call_original
expect { track_artifact_report }.to change {
counter.unique_events(event_names: test_event_name_2, start_date: start_time, end_date: end_time)
}.by 1
end
end
context 'when a single user started multiple pipelines with coverage reports' do
let_it_be(:pipeline1) { create(:ci_pipeline, :with_coverage_reports, project: project, user: user1) }
let_it_be(:pipeline2) { create(:ci_pipeline, :with_coverage_reports, project: project, user: user1) }
it 'tracks all pipelines using HLLRedisCounter by one user_id for the coverage test event' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_2, values: user1.id)
.twice
.and_call_original
expect do
described_class.new.execute(pipeline1)
described_class.new.execute(pipeline2)
end.to change {
counter.unique_events(event_names: test_event_name_2, start_date: start_time, end_date: end_time)
}.by 1
end
end
context 'when multiple users started multiple pipelines with coverage test reports' do
let_it_be(:pipeline1) { create(:ci_pipeline, :with_coverage_reports, project: project, user: user1) }
let_it_be(:pipeline2) { create(:ci_pipeline, :with_coverage_reports, project: project, user: user2) }
it 'tracks all pipelines using HLLRedisCounter by multiple users for coverage test reports' do
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_2, values: user1.id)
.and_call_original
expect(Gitlab::UsageDataCounters::HLLRedisCounter)
.to receive(:track_event)
.with(test_event_name_2, values: user2.id)
.and_call_original
expect do
described_class.new.execute(pipeline1)
described_class.new.execute(pipeline2)
end.to change {
counter.unique_events(event_names: test_event_name_2, start_date: start_time, end_date: end_time)
}.by 2
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class DestroyBatchService
include BaseServiceUtility
include ::Gitlab::Utils::StrongMemoize
# Danger: Private - Should only be called in Ci Services that pass a batch of job artifacts
# Not for use outside of the Ci:: namespace
# Adds the passed batch of job artifacts to the `ci_deleted_objects` table
# for asyncronous destruction of the objects in Object Storage via the `Ci::DeleteObjectsService`
# and then deletes the batch of related `ci_job_artifacts` records.
# Params:
# +job_artifacts+:: A relation of job artifacts to destroy (fewer than MAX_JOB_ARTIFACT_BATCH_SIZE)
# +pick_up_at+:: When to pick up for deletion of files
# Returns:
# +Hash+:: A hash with status and destroyed_artifacts_count keys
def initialize(job_artifacts, pick_up_at: nil, skip_projects_on_refresh: false)
@job_artifacts = job_artifacts.with_destroy_preloads.to_a
@pick_up_at = pick_up_at
@skip_projects_on_refresh = skip_projects_on_refresh
@destroyed_ids = []
end
# rubocop: disable CodeReuse/ActiveRecord
def execute(update_stats: true)
if @skip_projects_on_refresh
exclude_artifacts_undergoing_stats_refresh
else
track_artifacts_undergoing_stats_refresh
end
if @job_artifacts.empty?
return success(destroyed_ids: @destroyed_ids, destroyed_artifacts_count: 0, statistics_updates: {})
end
destroy_related_records(@job_artifacts)
destroy_around_hook(@job_artifacts) do
@destroyed_ids = @job_artifacts.map(&:id)
Ci::DeletedObject.transaction do
Ci::DeletedObject.bulk_import(@job_artifacts, @pick_up_at)
Ci::JobArtifact.id_in(@destroyed_ids).delete_all
end
end
after_batch_destroy_hook(@job_artifacts)
update_project_statistics! if update_stats
increment_monitoring_statistics(artifacts_count, artifacts_bytes)
Gitlab::Ci::Artifacts::Logger.log_deleted(@job_artifacts, 'Ci::JobArtifacts::DestroyBatchService#execute')
success(
destroyed_ids: @destroyed_ids,
destroyed_artifacts_count: artifacts_count,
statistics_updates: statistics_updates_per_project
)
end
# rubocop: enable CodeReuse/ActiveRecord
private
# Overriden in EE
# :nocov:
def destroy_around_hook(artifacts)
yield
end
# :nocov:
# Overriden in EE
def destroy_related_records(artifacts); end
# Overriden in EE
def after_batch_destroy_hook(artifacts); end
# using ! here since this can't be called inside a transaction
def update_project_statistics!
statistics_updates_per_project.each do |project, increments|
ProjectStatistics.bulk_increment_statistic(project, Ci::JobArtifact.project_statistics_name, increments)
end
end
def statistics_updates_per_project
strong_memoize(:statistics_updates_per_project) do
result = Hash.new { |updates, project| updates[project] = [] }
@job_artifacts.each_with_object(result) do |job_artifact, result|
increment = Gitlab::Counters::Increment.new(amount: -job_artifact.size.to_i, ref: job_artifact.id)
result[job_artifact.project] << increment
end
end
end
def increment_monitoring_statistics(size, bytes)
metrics.increment_destroyed_artifacts_count(size)
metrics.increment_destroyed_artifacts_bytes(bytes)
end
def metrics
@metrics ||= ::Gitlab::Ci::Artifacts::Metrics.new
end
def artifacts_count
strong_memoize(:artifacts_count) do
@job_artifacts.count
end
end
def artifacts_bytes
strong_memoize(:artifacts_bytes) do
@job_artifacts.sum { |artifact| artifact.try(:size) || 0 }
end
end
def track_artifacts_undergoing_stats_refresh
project_ids = @job_artifacts.find_all do |artifact|
artifact.project.refreshing_build_artifacts_size?
end.map(&:project_id).uniq
project_ids.each do |project_id|
Gitlab::ProjectStatsRefreshConflictsLogger.warn_artifact_deletion_during_stats_refresh(
method: 'Ci::JobArtifacts::DestroyBatchService#execute',
project_id: project_id
)
end
end
def exclude_artifacts_undergoing_stats_refresh
project_ids = Set.new
@job_artifacts.reject! do |artifact|
next unless artifact.project.refreshing_build_artifacts_size?
project_ids << artifact.project_id
end
if project_ids.any?
Gitlab::ProjectStatsRefreshConflictsLogger.warn_skipped_artifact_deletion_during_stats_refresh(
method: 'Ci::JobArtifacts::DestroyBatchService#execute',
project_ids: project_ids
)
end
end
end
end
end
Ci::JobArtifacts::DestroyBatchService.prepend_mod_with('Ci::JobArtifacts::DestroyBatchService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DestroyBatchService, feature_category: :build_artifacts do
let(:artifacts) { Ci::JobArtifact.where(id: [artifact_with_file.id, artifact_without_file.id]) }
let(:skip_projects_on_refresh) { false }
let(:service) do
described_class.new(
artifacts,
pick_up_at: Time.current,
skip_projects_on_refresh: skip_projects_on_refresh
)
end
let_it_be(:artifact_with_file, refind: true) do
create(:ci_job_artifact, :zip)
end
let_it_be(:artifact_without_file, refind: true) do
create(:ci_job_artifact)
end
let_it_be(:undeleted_artifact, refind: true) do
create(:ci_job_artifact)
end
describe '#execute' do
subject(:execute) { service.execute }
it 'creates a deleted object for artifact with attached file' do
expect { subject }.to change { Ci::DeletedObject.count }.by(1)
end
it 'does not remove the attached file' do
expect { execute }.not_to change { artifact_with_file.file.exists? }
end
it 'deletes the artifact records and logs them' do
expect(Gitlab::Ci::Artifacts::Logger)
.to receive(:log_deleted)
.with(
match_array([artifact_with_file, artifact_without_file]),
'Ci::JobArtifacts::DestroyBatchService#execute'
)
expect { subject }.to change { Ci::JobArtifact.count }.by(-2)
end
it 'reports metrics for destroyed artifacts' do
expect_next_instance_of(Gitlab::Ci::Artifacts::Metrics) do |metrics|
expect(metrics).to receive(:increment_destroyed_artifacts_count).with(2).and_call_original
expect(metrics).to receive(:increment_destroyed_artifacts_bytes).with(107464).and_call_original
end
execute
end
context 'when artifact belongs to a project that is undergoing stats refresh' do
let!(:artifact_under_refresh_1) do
create(:ci_job_artifact, :zip)
end
let!(:artifact_under_refresh_2) do
create(:ci_job_artifact, :zip)
end
let!(:artifact_under_refresh_3) do
create(:ci_job_artifact, :zip, project: artifact_under_refresh_2.project)
end
let(:artifacts) do
Ci::JobArtifact.where(id: [artifact_with_file.id, artifact_under_refresh_1.id, artifact_under_refresh_2.id,
artifact_under_refresh_3.id])
end
before do
create(:project_build_artifacts_size_refresh, :created, project: artifact_with_file.project)
create(:project_build_artifacts_size_refresh, :pending, project: artifact_under_refresh_1.project)
create(:project_build_artifacts_size_refresh, :running, project: artifact_under_refresh_2.project)
end
shared_examples 'avoiding N+1 queries' do
let!(:control_artifact_on_refresh) do
create(:ci_job_artifact, :zip)
end
let!(:control_artifact_non_refresh) do
create(:ci_job_artifact, :zip)
end
let!(:other_artifact_on_refresh) do
create(:ci_job_artifact, :zip)
end
let!(:other_artifact_on_refresh_2) do
create(:ci_job_artifact, :zip)
end
let!(:other_artifact_non_refresh) do
create(:ci_job_artifact, :zip)
end
let!(:control_artifacts) do
Ci::JobArtifact.where(
id: [
control_artifact_on_refresh.id,
control_artifact_non_refresh.id
]
)
end
let!(:artifacts) do
Ci::JobArtifact.where(
id: [
other_artifact_on_refresh.id,
other_artifact_on_refresh_2.id,
other_artifact_non_refresh.id
]
)
end
let(:control_service) do
described_class.new(
control_artifacts,
pick_up_at: Time.current,
skip_projects_on_refresh: skip_projects_on_refresh
)
end
before do
create(:project_build_artifacts_size_refresh, :pending, project: control_artifact_on_refresh.project)
create(:project_build_artifacts_size_refresh, :pending, project: other_artifact_on_refresh.project)
create(:project_build_artifacts_size_refresh, :pending, project: other_artifact_on_refresh_2.project)
end
it 'does not make multiple queries when fetching multiple project refresh records' do
control = ActiveRecord::QueryRecorder.new { control_service.execute }
expect { subject }.not_to exceed_query_limit(control)
end
end
context 'and skip_projects_on_refresh is set to false (default)' do
it 'logs the projects undergoing refresh and continues with the delete', :aggregate_failures do
expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
method: 'Ci::JobArtifacts::DestroyBatchService#execute',
project_id: artifact_under_refresh_1.project.id
).once
expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
method: 'Ci::JobArtifacts::DestroyBatchService#execute',
project_id: artifact_under_refresh_2.project.id
).once
expect { subject }.to change { Ci::JobArtifact.count }.by(-4)
end
it_behaves_like 'avoiding N+1 queries'
end
context 'and skip_projects_on_refresh is set to true' do
let(:skip_projects_on_refresh) { true }
it 'logs the projects undergoing refresh and excludes the artifacts from deletion', :aggregate_failures do
expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_skipped_artifact_deletion_during_stats_refresh).with(
method: 'Ci::JobArtifacts::DestroyBatchService#execute',
project_ids: match_array([artifact_under_refresh_1.project.id, artifact_under_refresh_2.project.id])
)
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
expect(Ci::JobArtifact.where(id: artifact_under_refresh_1.id)).to exist
expect(Ci::JobArtifact.where(id: artifact_under_refresh_2.id)).to exist
expect(Ci::JobArtifact.where(id: artifact_under_refresh_3.id)).to exist
end
it_behaves_like 'avoiding N+1 queries'
end
end
context 'when artifact belongs to a project not undergoing refresh' do
context 'and skip_projects_on_refresh is set to false (default)' do
it 'does not log any warnings', :aggregate_failures do
expect(Gitlab::ProjectStatsRefreshConflictsLogger).not_to receive(:warn_artifact_deletion_during_stats_refresh)
expect { subject }.to change { Ci::JobArtifact.count }.by(-2)
end
end
context 'and skip_projects_on_refresh is set to true' do
let(:skip_projects_on_refresh) { true }
it 'does not log any warnings', :aggregate_failures do
expect(Gitlab::ProjectStatsRefreshConflictsLogger).not_to receive(:warn_skipped_artifact_deletion_during_stats_refresh)
expect { subject }.to change { Ci::JobArtifact.count }.by(-2)
end
end
end
context 'ProjectStatistics', :sidekiq_inline do
let_it_be(:project_1) { create(:project) }
let_it_be(:project_2) { create(:project) }
let(:artifact_with_file) { create(:ci_job_artifact, :zip, project: project_1) }
let(:artifact_with_file_2) { create(:ci_job_artifact, :zip, project: project_1) }
let(:artifact_without_file) { create(:ci_job_artifact, project: project_2) }
let!(:artifacts) { Ci::JobArtifact.where(id: [artifact_with_file.id, artifact_without_file.id, artifact_with_file_2.id]) }
it 'updates project statistics by the relevant amount' do
expected_amount = -(artifact_with_file.size + artifact_with_file_2.size)
expect { execute }
.to change { project_1.statistics.reload.build_artifacts_size }.by(expected_amount)
.and change { project_2.statistics.reload.build_artifacts_size }.by(0)
end
it 'increments project statistics with artifact size as amount and job artifact id as ref' do
project_1_increments = [
have_attributes(amount: -artifact_with_file.size, ref: artifact_with_file.id),
have_attributes(amount: -artifact_with_file_2.file.size, ref: artifact_with_file_2.id)
]
project_2_increments = [have_attributes(amount: 0, ref: artifact_without_file.id)]
expect(ProjectStatistics).to receive(:bulk_increment_statistic).with(project_1, :build_artifacts_size, match_array(project_1_increments))
expect(ProjectStatistics).to receive(:bulk_increment_statistic).with(project_2, :build_artifacts_size, match_array(project_2_increments))
execute
end
context 'with update_stats: false' do
subject(:execute) { service.execute(update_stats: false) }
it 'does not update project statistics' do
expect { execute }.not_to change { [project_1.statistics.reload.build_artifacts_size, project_2.statistics.reload.build_artifacts_size] }
end
it 'returns statistic updates per project' do
project_1_updates = [
have_attributes(amount: -artifact_with_file.size, ref: artifact_with_file.id),
have_attributes(amount: -artifact_with_file_2.file.size, ref: artifact_with_file_2.id)
]
project_2_updates = [have_attributes(amount: 0, ref: artifact_without_file.id)]
expected_updates = {
statistics_updates: {
project_1 => match_array(project_1_updates),
project_2 => project_2_updates
}
}
expect(execute).to match(a_hash_including(expected_updates))
end
end
end
context 'when failed to destroy artifact' do
context 'when the import fails' do
before do
expect(Ci::DeletedObject)
.to receive(:bulk_import)
.once
.and_raise(ActiveRecord::RecordNotDestroyed)
end
it 'raises an exception and stop destroying' do
expect { execute }.to raise_error(ActiveRecord::RecordNotDestroyed)
.and not_change { Ci::JobArtifact.count }
end
end
end
context 'when there are no artifacts' do
let(:artifacts) { Ci::JobArtifact.none }
it 'does not raise error' do
expect { execute }.not_to raise_error
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(destroyed_artifacts_count: 0, destroyed_ids: [], statistics_updates: {}, status: :success)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
# This class is used by Ci::JobArtifact's FastDestroyAll implementation.
# Ci::JobArtifact.begin_fast_destroy instantiates this service and calls #destroy_records.
# This will set @statistics_updates instance variables.
# The same instance is passed to Ci::JobArtifact.finalize_fast_destroy, which then calls
# #update_statistics, using @statistics_updates set by #destroy_records.
class DestroyAssociationsService
BATCH_SIZE = 100
def initialize(job_artifacts_relation)
@job_artifacts_relation = job_artifacts_relation
@statistics_updates = {}
end
def destroy_records
@job_artifacts_relation.each_batch(of: BATCH_SIZE) do |relation|
service = Ci::JobArtifacts::DestroyBatchService.new(relation, pick_up_at: Time.current)
result = service.execute(update_stats: false)
@statistics_updates.merge!(result[:statistics_updates]) do |_project, existing_updates, new_updates|
existing_updates.concat(new_updates)
end
end
end
def update_statistics
@statistics_updates.each do |project, increments|
ProjectStatistics.bulk_increment_statistic(project, Ci::JobArtifact.project_statistics_name, increments)
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DestroyAssociationsService, feature_category: :build_artifacts do
let_it_be(:project_1) { create(:project) }
let_it_be(:project_2) { create(:project) }
let_it_be(:artifact_1, refind: true) { create(:ci_job_artifact, :zip, project: project_1) }
let_it_be(:artifact_2, refind: true) { create(:ci_job_artifact, :junit, project: project_2) }
let_it_be(:artifact_3, refind: true) { create(:ci_job_artifact, :terraform, project: project_1) }
let_it_be(:artifact_4, refind: true) { create(:ci_job_artifact, :trace, project: project_2) }
let_it_be(:artifact_5, refind: true) { create(:ci_job_artifact, :metadata, project: project_2) }
let_it_be(:locked_artifact, refind: true) { create(:ci_job_artifact, :zip, :locked, project: project_1) }
let(:artifact_ids_to_be_removed) { [artifact_1.id, artifact_2.id, artifact_3.id, artifact_4.id, artifact_5.id] }
let(:artifacts) { Ci::JobArtifact.where(id: artifact_ids_to_be_removed) }
let(:service) { described_class.new(artifacts) }
describe '#destroy_records' do
it 'removes all types of artifacts without updating statistics' do
expect_next_instance_of(Ci::JobArtifacts::DestroyBatchService) do |service|
expect(service).to receive(:execute).with(update_stats: false).and_call_original
end
expect { service.destroy_records }.to change { Ci::JobArtifact.count }.by(-artifact_ids_to_be_removed.count)
end
context 'with a locked artifact' do
let(:artifact_ids_to_be_removed) { [artifact_1.id, locked_artifact.id] }
it 'removes all artifacts' do
expect { service.destroy_records }.to change { Ci::JobArtifact.count }.by(-artifact_ids_to_be_removed.count)
end
end
context 'when there are no artifacts' do
let(:artifacts) { Ci::JobArtifact.none }
it 'does not raise error' do
expect { service.destroy_records }.not_to raise_error
end
end
end
describe '#update_statistics' do
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
service.destroy_records
end
it 'updates project statistics' do
project1_increments = [
have_attributes(amount: -artifact_1.size, ref: artifact_1.id),
have_attributes(amount: -artifact_3.size, ref: artifact_3.id)
]
project2_increments = [
have_attributes(amount: -artifact_2.size, ref: artifact_2.id),
have_attributes(amount: -artifact_4.size, ref: artifact_4.id),
have_attributes(amount: -artifact_5.size, ref: artifact_5.id)
]
expect(ProjectStatistics).to receive(:bulk_increment_statistic).once
.with(project_1, :build_artifacts_size, match_array(project1_increments))
expect(ProjectStatistics).to receive(:bulk_increment_statistic).once
.with(project_2, :build_artifacts_size, match_array(project2_increments))
service.update_statistics
end
context 'when there are no artifacts' do
let(:artifacts) { Ci::JobArtifact.none }
it 'does not raise error' do
expect { service.update_statistics }.not_to raise_error
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class DestroyAllExpiredService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::LoopHelpers
BATCH_SIZE = 100
LOOP_LIMIT = 500
LOOP_TIMEOUT = 5.minutes
LOCK_TIMEOUT = 6.minutes
EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
def initialize
@removed_artifacts_count = 0
@start_at = Time.current
end
##
# Destroy expired job artifacts on GitLab instance
#
# This destroy process cannot run for more than 6 minutes. This is for
# preventing multiple `ExpireBuildArtifactsWorker` CRON jobs run concurrently,
# which is scheduled every 7 minutes.
def execute
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
destroy_unlocked_job_artifacts
end
@removed_artifacts_count
end
private
def destroy_unlocked_job_artifacts
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
artifacts = Ci::JobArtifact.expired_before(@start_at).non_trace.artifact_unlocked.limit(BATCH_SIZE)
service_response = destroy_batch(artifacts)
@removed_artifacts_count += service_response[:destroyed_artifacts_count]
end
end
def destroy_batch(artifacts)
Ci::JobArtifacts::DestroyBatchService.new(artifacts, skip_projects_on_refresh: true).execute
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_shared_state,
feature_category: :build_artifacts do
include ExclusiveLeaseHelpers
let(:service) { described_class.new }
describe '.execute' do
subject { service.execute }
let_it_be(:locked_pipeline) { create(:ci_pipeline, :artifacts_locked) }
let_it_be(:pipeline) { create(:ci_pipeline, :unlocked) }
let_it_be(:locked_job) { create(:ci_build, :success, pipeline: locked_pipeline) }
let_it_be(:job) { create(:ci_build, :success, pipeline: pipeline) }
context 'when artifact is expired' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
context 'with preloaded relationships' do
let(:second_artifact) { create(:ci_job_artifact, :expired, :junit, job: job) }
let(:more_artifacts) do
[
create(:ci_job_artifact, :expired, :sast, job: job),
create(:ci_job_artifact, :expired, :metadata, job: job),
create(:ci_job_artifact, :expired, :codequality, job: job),
create(:ci_job_artifact, :expired, :accessibility, job: job)
]
end
before do
stub_const("#{described_class}::LOOP_LIMIT", 1)
# This artifact-with-file is created before the control execution to ensure
# that the DeletedObject operations are accounted for in the query count.
second_artifact
end
it 'performs a consistent number of queries' do
control = ActiveRecord::QueryRecorder.new { service.execute }
more_artifacts
expect { subject }.not_to exceed_query_limit(control.count)
end
end
context 'when artifact is not locked' do
it 'deletes job artifact record' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
end
context 'when the artifact does not have a file attached to it' do
it 'does not create deleted objects' do
expect(artifact.exists?).to be_falsy # sanity check
expect { subject }.not_to change { Ci::DeletedObject.count }
end
end
context 'when the artifact has a file attached to it' do
let!(:artifact) { create(:ci_job_artifact, :expired, :zip, job: job, locked: job.pipeline.locked) }
it 'creates a deleted object' do
expect { subject }.to change { Ci::DeletedObject.count }.by(1)
end
it 'resets project statistics', :sidekiq_inline do
expect { subject }
.to change { artifact.project.statistics.reload.build_artifacts_size }.by(-artifact.file.size)
end
it 'does not remove the files' do
expect { subject }.not_to change { artifact.file.exists? }
end
end
context 'when the project in which the artifact belongs to is undergoing stats refresh' do
before do
create(:project_build_artifacts_size_refresh, :pending, project: artifact.project)
end
it 'does not destroy job artifact' do
expect { subject }.not_to change { Ci::JobArtifact.count }
end
end
end
context 'when artifact is locked' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: locked_job, locked: locked_job.pipeline.locked) }
it 'does not destroy job artifact' do
expect { subject }.not_to change { Ci::JobArtifact.count }
end
end
end
context 'when artifact is not expired' do
let!(:artifact) { create(:ci_job_artifact, job: job, locked: job.pipeline.locked) }
it 'does not destroy expired job artifacts' do
expect { subject }.not_to change { Ci::JobArtifact.count }
end
end
context 'when artifact is permanent' do
let!(:artifact) { create(:ci_job_artifact, expire_at: nil, job: job, locked: job.pipeline.locked) }
it 'does not destroy expired job artifacts' do
expect { subject }.not_to change { Ci::JobArtifact.count }
end
end
context 'when failed to destroy artifact' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
before do
stub_const("#{described_class}::LOOP_LIMIT", 10)
end
context 'when the import fails' do
before do
expect(Ci::DeletedObject)
.to receive(:bulk_import)
.once
.and_raise(ActiveRecord::RecordNotDestroyed)
end
it 'raises an exception and stop destroying' do
expect { subject }.to raise_error(ActiveRecord::RecordNotDestroyed)
.and not_change { Ci::JobArtifact.count }.from(1)
end
end
context 'when the delete fails' do
before do
expect(Ci::JobArtifact)
.to receive(:id_in)
.once
.and_raise(ActiveRecord::RecordNotDestroyed)
end
it 'raises an exception rolls back the insert' do
expect { subject }.to raise_error(ActiveRecord::RecordNotDestroyed)
.and not_change { Ci::DeletedObject.count }.from(0)
end
end
end
context 'when exclusive lease has already been taken by the other instance' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
before do
stub_exclusive_lease_taken(described_class::EXCLUSIVE_LOCK_KEY, timeout: described_class::LOCK_TIMEOUT)
end
it 'raises an error and does not start destroying' do
expect { subject }.to raise_error(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError)
.and not_change { Ci::JobArtifact.count }.from(1)
end
end
context 'with a second artifact and batch size of 1' do
let(:second_job) { create(:ci_build, :success, pipeline: pipeline) }
let!(:second_artifact) { create(:ci_job_artifact, :archive, expire_at: 1.day.ago, job: second_job, locked: job.pipeline.locked) }
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
context 'when timeout happens' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
end
it 'destroys one artifact' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(1)
end
end
context 'when loop reached loop limit' do
before do
stub_const("#{described_class}::LOOP_LIMIT", 1)
end
it 'destroys one artifact' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(1)
end
end
context 'when the number of artifacts is greater than than batch size' do
it 'destroys all expired artifacts' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-2)
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(2)
end
end
end
context 'when there are no artifacts' do
it 'does not raise error' do
expect { subject }.not_to raise_error
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq(0)
end
end
context 'when some artifacts are locked' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
let!(:locked_artifact) { create(:ci_job_artifact, :expired, job: locked_job, locked: locked_job.pipeline.locked) }
it 'destroys only unlocked artifacts' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
expect(locked_artifact).to be_persisted
end
end
context 'when some artifacts are trace' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: job, locked: job.pipeline.locked) }
let!(:trace_artifact) { create(:ci_job_artifact, :trace, :expired, job: job, locked: job.pipeline.locked) }
it 'destroys only non trace artifacts' do
expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
expect(trace_artifact).to be_persisted
end
end
context 'when all artifacts are locked' do
let!(:artifact) { create(:ci_job_artifact, :expired, job: locked_job, locked: locked_job.pipeline.locked) }
it 'destroys no artifacts' do
expect { subject }.to change { Ci::JobArtifact.count }.by(0)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class UpdateUnknownLockedStatusService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::LoopHelpers
BATCH_SIZE = 100
LOOP_TIMEOUT = 5.minutes
LOOP_LIMIT = 100
LARGE_LOOP_LIMIT = 500
EXCLUSIVE_LOCK_KEY = 'unknown_status_job_artifacts:update:lock'
LOCK_TIMEOUT = 6.minutes
def initialize
@removed_count = 0
@locked_count = 0
@start_at = Time.current
@loop_limit = Feature.enabled?(:ci_job_artifacts_backlog_large_loop_limit) ? LARGE_LOOP_LIMIT : LOOP_LIMIT
end
def execute
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
update_locked_status_on_unknown_artifacts
end
{ removed: @removed_count, locked: @locked_count }
end
private
def update_locked_status_on_unknown_artifacts
loop_until(timeout: LOOP_TIMEOUT, limit: @loop_limit) do
unknown_status_build_ids = safely_ordered_ci_job_artifacts_locked_unknown_relation.pluck_job_id.uniq
locked_pipe_build_ids = ::Ci::Build
.with_pipeline_locked_artifacts
.id_in(unknown_status_build_ids)
.pluck_primary_key
@locked_count += update_unknown_artifacts(locked_pipe_build_ids, Ci::JobArtifact.lockeds[:artifacts_locked])
unlocked_pipe_build_ids = unknown_status_build_ids - locked_pipe_build_ids
service_response = batch_destroy_artifacts(unlocked_pipe_build_ids)
@removed_count += service_response[:destroyed_artifacts_count]
end
end
def update_unknown_artifacts(build_ids, locked_value)
return 0 unless build_ids.any?
expired_locked_unknown_artifacts.for_job_ids(build_ids).update_all(locked: locked_value)
end
def batch_destroy_artifacts(build_ids)
deleteable_artifacts_relation =
if build_ids.any?
expired_locked_unknown_artifacts.for_job_ids(build_ids)
else
Ci::JobArtifact.none
end
Ci::JobArtifacts::DestroyBatchService.new(deleteable_artifacts_relation).execute
end
def expired_locked_unknown_artifacts
# UPDATE queries perform better without the specific order and limit
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/76509#note_891260455
Ci::JobArtifact.expired_before(@start_at).artifact_unknown
end
def safely_ordered_ci_job_artifacts_locked_unknown_relation
# Adding the ORDER and LIMIT improves performance when we don't have build_id
expired_locked_unknown_artifacts.limit(BATCH_SIZE).order_expired_asc
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::UpdateUnknownLockedStatusService, :clean_gitlab_redis_shared_state,
feature_category: :build_artifacts do
include ExclusiveLeaseHelpers
let(:service) { described_class.new }
describe '.execute' do
subject { service.execute }
let_it_be(:locked_pipeline) { create(:ci_pipeline, :artifacts_locked) }
let_it_be(:pipeline) { create(:ci_pipeline, :unlocked) }
let_it_be(:locked_job) { create(:ci_build, :success, pipeline: locked_pipeline) }
let_it_be(:job) { create(:ci_build, :success, pipeline: pipeline) }
let!(:unknown_unlocked_artifact) do
create(:ci_job_artifact, :junit, expire_at: 1.hour.ago, job: job, locked: Ci::JobArtifact.lockeds[:unknown])
end
let!(:unknown_locked_artifact) do
create(:ci_job_artifact, :lsif,
expire_at: 1.day.ago,
job: locked_job,
locked: Ci::JobArtifact.lockeds[:unknown]
)
end
let!(:unlocked_artifact) do
create(:ci_job_artifact, :archive, expire_at: 1.hour.ago, job: job, locked: Ci::JobArtifact.lockeds[:unlocked])
end
let!(:locked_artifact) do
create(:ci_job_artifact, :sast, :raw,
expire_at: 1.day.ago,
job: locked_job,
locked: Ci::JobArtifact.lockeds[:artifacts_locked]
)
end
context 'when artifacts are expired' do
it 'sets artifact_locked when the pipeline is locked' do
expect { service.execute }
.to change { unknown_locked_artifact.reload.locked }.from('unknown').to('artifacts_locked')
.and not_change { Ci::JobArtifact.exists?(locked_artifact.id) }
end
it 'destroys the artifact when the pipeline is unlocked' do
expect { subject }.to change { Ci::JobArtifact.exists?(unknown_unlocked_artifact.id) }.from(true).to(false)
end
it 'does not update ci_job_artifact rows with known locked values' do
expect { service.execute }
.to not_change(locked_artifact, :attributes)
.and not_change { Ci::JobArtifact.exists?(locked_artifact.id) }
.and not_change(unlocked_artifact, :attributes)
.and not_change { Ci::JobArtifact.exists?(unlocked_artifact.id) }
end
it 'logs the counts of affected artifacts' do
expect(subject).to eq({ removed: 1, locked: 1 })
end
end
context 'in a single iteration' do
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
context 'due to the LOOP_TIMEOUT' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
end
it 'affects the earliest expired artifact first' do
subject
expect(unknown_locked_artifact.reload.locked).to eq('artifacts_locked')
expect(unknown_unlocked_artifact.reload.locked).to eq('unknown')
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq({ removed: 0, locked: 1 })
end
end
context 'due to @loop_limit' do
before do
stub_const("#{described_class}::LARGE_LOOP_LIMIT", 1)
end
it 'affects the most recently expired artifact first' do
subject
expect(unknown_locked_artifact.reload.locked).to eq('artifacts_locked')
expect(unknown_unlocked_artifact.reload.locked).to eq('unknown')
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq({ removed: 0, locked: 1 })
end
end
end
context 'when artifact is not expired' do
let!(:unknown_unlocked_artifact) do
create(:ci_job_artifact, :junit,
expire_at: 1.year.from_now,
job: job,
locked: Ci::JobArtifact.lockeds[:unknown]
)
end
it 'does not change the locked status' do
expect { service.execute }.not_to change { unknown_unlocked_artifact.locked }
expect(Ci::JobArtifact.exists?(unknown_unlocked_artifact.id)).to eq(true)
end
end
context 'when exclusive lease has already been taken by the other instance' do
before do
stub_exclusive_lease_taken(described_class::EXCLUSIVE_LOCK_KEY, timeout: described_class::LOCK_TIMEOUT)
end
it 'raises an error and' do
expect { subject }.to raise_error(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError)
end
end
context 'when there are no unknown status artifacts' do
before do
Ci::JobArtifact.update_all(locked: :unlocked)
end
it 'does not raise error' do
expect { subject }.not_to raise_error
end
it 'reports the number of destroyed artifacts' do
is_expected.to eq({ removed: 0, locked: 0 })
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class BulkDeleteByProjectService
include BaseServiceUtility
JOB_ARTIFACTS_COUNT_LIMIT = 50
def initialize(job_artifact_ids:, project:, current_user:)
@job_artifact_ids = job_artifact_ids
@project = project
@current_user = current_user
end
def execute
if exceeds_limits?
return ServiceResponse.error(
message: "Can only delete up to #{JOB_ARTIFACTS_COUNT_LIMIT} job artifacts per call"
)
end
find_result = find_artifacts
return ServiceResponse.error(message: find_result[:error_message]) if find_result[:error_message]
@job_artifact_scope = find_result[:scope]
unless all_job_artifacts_belong_to_project?
return ServiceResponse.error(message: 'Not all artifacts belong to requested project')
end
result = Ci::JobArtifacts::DestroyBatchService.new(job_artifact_scope).execute
destroyed_artifacts_count = result.fetch(:destroyed_artifacts_count)
destroyed_ids = result.fetch(:destroyed_ids)
ServiceResponse.success(
payload: {
destroyed_count: destroyed_artifacts_count,
destroyed_ids: destroyed_ids,
errors: []
})
end
private
def find_artifacts
job_artifacts = ::Ci::JobArtifact.id_in(job_artifact_ids)
error_message = nil
if job_artifacts.count != job_artifact_ids.count
not_found_artifacts = job_artifact_ids - job_artifacts.map(&:id)
error_message = "Artifacts (#{not_found_artifacts.join(',')}) not found"
end
{ scope: job_artifacts, error_message: error_message }
end
def exceeds_limits?
job_artifact_ids.count > JOB_ARTIFACTS_COUNT_LIMIT
end
def all_job_artifacts_belong_to_project?
# rubocop:disable CodeReuse/ActiveRecord
job_artifact_scope.pluck(:project_id).all?(project.id)
# rubocop:enable CodeReuse/ActiveRecord
end
attr_reader :job_artifact_ids, :job_artifact_scope, :current_user, :project
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::JobArtifacts::BulkDeleteByProjectService, "#execute", feature_category: :build_artifacts do
subject(:execute) do
described_class.new(
job_artifact_ids: job_artifact_ids,
current_user: current_user,
project: project).execute
end
let_it_be(:current_user) { create(:user) }
let_it_be(:build, reload: true) do
create(:ci_build, :artifacts, :trace_artifact, user: current_user)
end
let_it_be(:project) { build.project }
let_it_be(:job_artifact_ids) { build.job_artifacts.map(&:id) }
describe '#execute' do
context 'when number of artifacts exceeds limits to delete' do
let_it_be(:second_build, reload: true) do
create(:ci_build, :artifacts, :trace_artifact, user: current_user, project: project)
end
let_it_be(:job_artifact_ids) { ::Ci::JobArtifact.all.map(&:id) }
before do
project.add_maintainer(current_user)
stub_const("#{described_class}::JOB_ARTIFACTS_COUNT_LIMIT", 1)
end
it 'fails to destroy' do
result = execute
expect(result).to be_error
expect(result[:message]).to eq('Can only delete up to 1 job artifacts per call')
end
end
context 'when requested not existing artifacts do delete' do
let_it_be(:deleted_build, reload: true) do
create(:ci_build, :artifacts, :trace_artifact, user: current_user, project: project)
end
let_it_be(:deleted_job_artifacts) { deleted_build.job_artifacts }
let_it_be(:job_artifact_ids) { ::Ci::JobArtifact.all.map(&:id) }
before do
project.add_maintainer(current_user)
deleted_job_artifacts.each(&:destroy!)
end
it 'fails to destroy' do
result = execute
expect(result).to be_error
expect(result[:message]).to eq("Artifacts (#{deleted_job_artifacts.map(&:id).join(',')}) not found")
end
end
context 'when maintainer has access to the project' do
before do
project.add_maintainer(current_user)
end
it 'is successful' do
result = execute
expect(result).to be_success
expect(result.payload).to eq(
{
destroyed_count: job_artifact_ids.count,
destroyed_ids: job_artifact_ids,
errors: []
}
)
expect(::Ci::JobArtifact.where(id: job_artifact_ids).count).to eq(0)
end
context 'and partially owns artifacts' do
let_it_be(:orphan_artifact) { create(:ci_job_artifact, :archive) }
let_it_be(:orphan_artifact_id) { orphan_artifact.id }
let_it_be(:owned_artifacts_ids) { build.job_artifacts.erasable.map(&:id) }
let_it_be(:job_artifact_ids) { [orphan_artifact_id] + owned_artifacts_ids }
it 'fails to destroy' do
result = execute
expect(result).to be_error
expect(result[:message]).to be('Not all artifacts belong to requested project')
expect(::Ci::JobArtifact.where(id: job_artifact_ids).count).to eq(3)
end
end
context 'and request all artifacts from a different project' do
let_it_be(:different_project_artifact) { create(:ci_job_artifact, :archive) }
let_it_be(:job_artifact_ids) { [different_project_artifact] }
let_it_be(:different_build, reload: true) do
create(:ci_build, :artifacts, :trace_artifact, user: current_user)
end
let_it_be(:different_project) { different_build.project }
before do
different_project.add_maintainer(current_user)
end
it 'returns a error' do
result = execute
expect(result).to be_error
expect(result[:message]).to be('Not all artifacts belong to requested project')
expect(::Ci::JobArtifact.where(id: job_artifact_ids).count).to eq(job_artifact_ids.count)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class CreateService < ::BaseService
include Gitlab::Utils::UsageData
LSIF_ARTIFACT_TYPE = 'lsif'
OBJECT_STORAGE_ERRORS = [
Errno::EIO,
Google::Apis::ServerError,
Signet::RemoteServerError
].freeze
def initialize(job)
@job = job
@project = job.project
@pipeline = job.pipeline
end
def authorize(artifact_type:, filesize: nil)
result = validate_requirements(artifact_type: artifact_type, filesize: filesize)
return result unless result[:status] == :success
headers = JobArtifactUploader.workhorse_authorize(
has_length: false,
maximum_size: max_size(artifact_type),
use_final_store_path: true,
final_store_path_root_id: project.id
)
if lsif?(artifact_type)
headers[:ProcessLsif] = true
track_usage_event('i_source_code_code_intelligence', project.id)
end
success(headers: headers)
end
def execute(artifacts_file, params, metadata_file: nil)
result = validate_requirements(artifact_type: params[:artifact_type], filesize: artifacts_file.size)
return result unless result[:status] == :success
return success if sha256_matches_existing_artifact?(params[:artifact_type], artifacts_file)
build_result = build_artifact(artifacts_file, params, metadata_file)
return build_result unless build_result[:status] == :success
artifact = build_result[:artifact]
artifact_metadata = build_result[:artifact_metadata]
track_artifact_uploader(artifact)
parse_result = parse_artifact(artifact)
return parse_result unless parse_result[:status] == :success
persist_artifact(artifact, artifact_metadata)
end
private
attr_reader :job, :project, :pipeline
def validate_requirements(artifact_type:, filesize:)
return too_large_error if too_large?(artifact_type, filesize)
success
end
def too_large?(type, size)
size > max_size(type) if size
end
def lsif?(type)
type == LSIF_ARTIFACT_TYPE
end
def max_size(type)
Ci::JobArtifact.max_artifact_size(type: type, project: project)
end
def too_large_error
error('file size has reached maximum size limit', :payload_too_large)
end
def build_artifact(artifacts_file, params, metadata_file)
artifact_attributes = {
job: job,
project: project,
expire_in: expire_in(params),
accessibility: accessibility(params),
locked: pipeline.locked
}
file_attributes = {
file_type: params[:artifact_type],
file_format: params[:artifact_format],
file_sha256: artifacts_file.sha256,
file: artifacts_file
}
artifact = Ci::JobArtifact.new(artifact_attributes.merge(file_attributes))
artifact_metadata = build_metadata_artifact(artifact, metadata_file) if metadata_file
success(artifact: artifact, artifact_metadata: artifact_metadata)
end
def build_metadata_artifact(job_artifact, metadata_file)
Ci::JobArtifact.new(
job: job_artifact.job,
project: job_artifact.project,
expire_at: job_artifact.expire_at,
locked: job_artifact.locked,
file: metadata_file,
file_type: :metadata,
file_format: :gzip,
file_sha256: metadata_file.sha256,
accessibility: job_artifact.accessibility
)
end
def expire_in(params)
params['expire_in'] || Gitlab::CurrentSettings.current_application_settings.default_artifacts_expire_in
end
def accessibility(params)
accessibility = params[:accessibility]
return :public if Feature.disabled?(:non_public_artifacts, project, type: :development)
return accessibility if accessibility.present?
job.artifact_is_public_in_config? ? :public : :private
end
def parse_artifact(artifact)
case artifact.file_type
when 'dotenv' then parse_dotenv_artifact(artifact)
when 'annotations' then parse_annotations_artifact(artifact)
else success
end
end
def persist_artifact(artifact, artifact_metadata)
job.transaction do
# NOTE: The `artifacts_expire_at` column is already deprecated and to be removed in the near future.
# Running it first because in migrations we lock the `ci_builds` table
# first and then the others. This reduces the chances of deadlocks.
job.update_column(:artifacts_expire_at, artifact.expire_at)
artifact.save!
artifact_metadata&.save!
end
success(artifact: artifact)
rescue ActiveRecord::RecordNotUnique => error
track_exception(error, artifact.file_type)
error('another artifact of the same type already exists', :bad_request)
rescue *OBJECT_STORAGE_ERRORS => error
track_exception(error, artifact.file_type)
error(error.message, :service_unavailable)
rescue StandardError => error
track_exception(error, artifact.file_type)
error(error.message, :bad_request)
end
def sha256_matches_existing_artifact?(artifact_type, artifacts_file)
existing_artifact = job.job_artifacts.find_by_file_type(artifact_type)
return false unless existing_artifact
existing_artifact.file_sha256 == artifacts_file.sha256
end
def track_exception(error, artifact_type)
Gitlab::ErrorTracking.track_exception(
error,
job_id: job.id,
project_id: job.project_id,
uploading_type: artifact_type
)
end
def track_artifact_uploader(_artifact)
# Overridden in EE
end
def parse_dotenv_artifact(artifact)
Ci::ParseDotenvArtifactService.new(project, current_user).execute(artifact)
end
def parse_annotations_artifact(artifact)
Ci::ParseAnnotationsArtifactService.new(project, current_user).execute(artifact)
end
end
end
end
Ci::JobArtifacts::CreateService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::CreateService, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
include WorkhorseHelpers
include Gitlab::Utils::Gzip
let_it_be(:project) { create(:project) }
let(:service) { described_class.new(job) }
let(:job) { create(:ci_build, project: project) }
describe '#authorize', :aggregate_failures do
let(:artifact_type) { 'archive' }
let(:filesize) { nil }
subject(:authorize) { service.authorize(artifact_type: artifact_type, filesize: filesize) }
shared_examples_for 'handling lsif artifact' do
context 'when artifact is lsif' do
let(:artifact_type) { 'lsif' }
it 'includes ProcessLsif in the headers' do
expect(authorize[:headers][:ProcessLsif]).to eq(true)
end
end
end
shared_examples_for 'validating requirements' do
context 'when filesize is specified' do
let(:max_artifact_size) { 10 }
before do
allow(Ci::JobArtifact)
.to receive(:max_artifact_size)
.with(type: artifact_type, project: project)
.and_return(max_artifact_size)
end
context 'and filesize exceeds the limit' do
let(:filesize) { max_artifact_size + 1 }
it 'returns error' do
expect(authorize[:status]).to eq(:error)
end
end
context 'and filesize does not exceed the limit' do
let(:filesize) { max_artifact_size - 1 }
it 'returns success' do
expect(authorize[:status]).to eq(:success)
end
end
end
end
shared_examples_for 'uploading to temp location' do |store_type|
# We are not testing the entire headers here because this is fully tested
# in workhorse_authorize's spec. We just want to confirm that it indeed used the temp path
# by checking some indicators in the headers returned.
if store_type == :object_storage
it 'includes the authorize headers' do
expect(authorize[:status]).to eq(:success)
expect(authorize[:headers][:RemoteObject][:StoreURL]).to include(ObjectStorage::TMP_UPLOAD_PATH)
end
else
it 'includes the authorize headers' do
expect(authorize[:status]).to eq(:success)
expect(authorize[:headers][:TempPath]).to include(ObjectStorage::TMP_UPLOAD_PATH)
end
end
it_behaves_like 'handling lsif artifact'
it_behaves_like 'validating requirements'
end
context 'when object storage is enabled' do
context 'and direct upload is enabled' do
let(:final_store_path) { '12/34/abc-123' }
before do
stub_artifacts_object_storage(JobArtifactUploader, direct_upload: true)
allow(JobArtifactUploader)
.to receive(:generate_final_store_path)
.with(root_id: project.id)
.and_return(final_store_path)
end
it 'includes the authorize headers' do
expect(authorize[:status]).to eq(:success)
expect(authorize[:headers][:RemoteObject][:ID]).to eq(final_store_path)
# We are not testing the entire headers here because this is fully tested
# in workhorse_authorize's spec. We just want to confirm that it indeed used the final path
# by checking some indicators in the headers returned.
expect(authorize[:headers][:RemoteObject][:StoreURL])
.to include(final_store_path)
# We have to ensure to tell Workhorse to skip deleting the file after upload
# because we are uploading the file to its final location
expect(authorize[:headers][:RemoteObject][:SkipDelete]).to eq(true)
end
it_behaves_like 'handling lsif artifact'
it_behaves_like 'validating requirements'
end
context 'and direct upload is disabled' do
before do
stub_artifacts_object_storage(JobArtifactUploader, direct_upload: false)
end
it_behaves_like 'uploading to temp location', :local_storage
end
end
context 'when object storage is disabled' do
it_behaves_like 'uploading to temp location', :local_storage
end
end
describe '#execute' do
let(:artifacts_sha256) { '0' * 64 }
let(:metadata_file) { nil }
let(:params) do
{
'artifact_type' => 'archive',
'artifact_format' => 'zip'
}.with_indifferent_access
end
subject(:execute) { service.execute(artifacts_file, params, metadata_file: metadata_file) }
shared_examples_for 'handling accessibility' do
shared_examples 'public accessibility' do
it 'sets accessibility to public level' do
subject
expect(job.job_artifacts).not_to be_empty
expect(job.job_artifacts).to all be_public_accessibility
end
end
shared_examples 'private accessibility' do
it 'sets accessibility to private level' do
subject
expect(job.job_artifacts).not_to be_empty
expect(job.job_artifacts).to all be_private_accessibility
end
end
context 'when non_public_artifacts flag is disabled' do
before do
stub_feature_flags(non_public_artifacts: false)
end
it_behaves_like 'public accessibility'
end
context 'when non_public_artifacts flag is enabled' do
context 'and accessibility is defined in the params' do
context 'and is passed as private' do
before do
params.merge!('accessibility' => 'private')
end
it_behaves_like 'private accessibility'
end
context 'and is passed as public' do
before do
params.merge!('accessibility' => 'public')
end
it_behaves_like 'public accessibility'
end
end
context 'and accessibility is not defined in the params' do
context 'and job has no public artifacts defined in its CI config' do
it_behaves_like 'public accessibility'
end
context 'and job artifacts defined as private in the CI config' do
let(:job) { create(:ci_build, :with_private_artifacts_config, project: project) }
it_behaves_like 'private accessibility'
end
context 'and job artifacts defined as public in the CI config' do
let(:job) { create(:ci_build, :with_public_artifacts_config, project: project) }
it_behaves_like 'public accessibility'
end
end
end
context 'when accessibility passed as invalid value' do
before do
params.merge!('accessibility' => 'foo')
end
it 'fails with argument error' do
expect { execute }.to raise_error(ArgumentError, "'foo' is not a valid accessibility")
end
end
end
shared_examples_for 'handling metadata file' do
context 'when metadata file is also uploaded' do
let(:metadata_file) do
file_to_upload('spec/fixtures/ci_build_artifacts_metadata.gz', sha256: artifacts_sha256)
end
before do
stub_application_setting(default_artifacts_expire_in: '1 day')
end
it 'creates a new metadata job artifact' do
expect { execute }.to change { Ci::JobArtifact.where(file_type: :metadata).count }.by(1)
new_artifact = job.job_artifacts.last
expect(new_artifact.project).to eq(job.project)
expect(new_artifact.file).to be_present
expect(new_artifact.file_type).to eq('metadata')
expect(new_artifact.file_format).to eq('gzip')
expect(new_artifact.file_sha256).to eq(artifacts_sha256)
expect(new_artifact.locked).to eq(job.pipeline.locked)
end
it 'logs the created artifact and metadata' do
expect(Gitlab::Ci::Artifacts::Logger)
.to receive(:log_created)
.with(an_instance_of(Ci::JobArtifact)).twice
subject
end
it_behaves_like 'handling accessibility'
it 'sets expiration date according to application settings' do
expected_expire_at = 1.day.from_now
expect(execute).to match(a_hash_including(status: :success, artifact: anything))
archive_artifact, metadata_artifact = job.job_artifacts.last(2)
expect(job.artifacts_expire_at).to be_within(1.minute).of(expected_expire_at)
expect(archive_artifact.expire_at).to be_within(1.minute).of(expected_expire_at)
expect(metadata_artifact.expire_at).to be_within(1.minute).of(expected_expire_at)
end
context 'when expire_in params is set to a specific value' do
before do
params.merge!('expire_in' => '2 hours')
end
it 'sets expiration date according to the parameter' do
expected_expire_at = 2.hours.from_now
expect(execute).to match(a_hash_including(status: :success, artifact: anything))
archive_artifact, metadata_artifact = job.job_artifacts.last(2)
expect(job.artifacts_expire_at).to be_within(1.minute).of(expected_expire_at)
expect(archive_artifact.expire_at).to be_within(1.minute).of(expected_expire_at)
expect(metadata_artifact.expire_at).to be_within(1.minute).of(expected_expire_at)
end
end
context 'when expire_in params is set to `never`' do
before do
params.merge!('expire_in' => 'never')
end
it 'sets expiration date according to the parameter' do
expected_expire_at = nil
expect(execute).to be_truthy
archive_artifact, metadata_artifact = job.job_artifacts.last(2)
expect(job.artifacts_expire_at).to eq(expected_expire_at)
expect(archive_artifact.expire_at).to eq(expected_expire_at)
expect(metadata_artifact.expire_at).to eq(expected_expire_at)
end
end
end
end
shared_examples_for 'handling dotenv' do |storage_type|
context 'when artifact type is dotenv' do
let(:params) do
{
'artifact_type' => 'dotenv',
'artifact_format' => 'gzip'
}.with_indifferent_access
end
if storage_type == :object_storage
let(:object_body) { File.read('spec/fixtures/build.env.gz') }
let(:upload_filename) { 'build.env.gz' }
before do
stub_request(:get, %r{s3.amazonaws.com/#{remote_path}})
.to_return(status: 200, body: File.read('spec/fixtures/build.env.gz'))
end
else
let(:artifacts_file) do
file_to_upload('spec/fixtures/build.env.gz', sha256: artifacts_sha256)
end
end
it 'calls parse service' do
expect_any_instance_of(Ci::ParseDotenvArtifactService) do |service|
expect(service).to receive(:execute).once.and_call_original
end
expect(execute[:status]).to eq(:success)
expect(job.job_variables.as_json(only: [:key, :value, :source])).to contain_exactly(
hash_including('key' => 'KEY1', 'value' => 'VAR1', 'source' => 'dotenv'),
hash_including('key' => 'KEY2', 'value' => 'VAR2', 'source' => 'dotenv'))
end
end
end
shared_examples_for 'handling annotations' do |storage_type|
context 'when artifact type is annotations' do
let(:params) do
{
'artifact_type' => 'annotations',
'artifact_format' => 'gzip'
}.with_indifferent_access
end
if storage_type == :object_storage
let(:object_body) { File.read('spec/fixtures/gl-annotations.json.gz') }
let(:upload_filename) { 'gl-annotations.json.gz' }
before do
stub_request(:get, %r{s3.amazonaws.com/#{remote_path}})
.to_return(status: 200, body: File.read('spec/fixtures/gl-annotations.json.gz'))
end
else
let(:artifacts_file) do
file_to_upload('spec/fixtures/gl-annotations.json.gz', sha256: artifacts_sha256)
end
end
it 'calls parse service' do
expect_any_instance_of(Ci::ParseAnnotationsArtifactService) do |service|
expect(service).to receive(:execute).once.and_call_original
end
expect(execute[:status]).to eq(:success)
expect(job.job_annotations.as_json).to contain_exactly(
hash_including('name' => 'external_links', 'data' => [
hash_including('external_link' => hash_including('label' => 'URL 1', 'url' => 'https://url1.example.com/')),
hash_including('external_link' => hash_including('label' => 'URL 2', 'url' => 'https://url2.example.com/'))
])
)
end
end
end
shared_examples_for 'handling object storage errors' do
shared_examples 'rescues object storage error' do |klass, message, expected_message|
it "handles #{klass}" do
allow_next_instance_of(JobArtifactUploader) do |uploader|
allow(uploader).to receive(:store!).and_raise(klass, message)
end
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.and_call_original
expect(execute).to match(
a_hash_including(
http_status: :service_unavailable,
message: expected_message || message,
status: :error))
end
end
it_behaves_like 'rescues object storage error',
Errno::EIO, 'some/path', 'Input/output error - some/path'
it_behaves_like 'rescues object storage error',
Google::Apis::ServerError, 'Server error'
it_behaves_like 'rescues object storage error',
Signet::RemoteServerError, 'The service is currently unavailable'
end
shared_examples_for 'validating requirements' do
context 'when filesize is specified' do
let(:max_artifact_size) { 10 }
before do
allow(Ci::JobArtifact)
.to receive(:max_artifact_size)
.with(type: 'archive', project: project)
.and_return(max_artifact_size)
allow(artifacts_file).to receive(:size).and_return(filesize)
end
context 'and filesize exceeds the limit' do
let(:filesize) { max_artifact_size + 1 }
it 'returns error' do
expect(execute[:status]).to eq(:error)
end
end
context 'and filesize does not exceed the limit' do
let(:filesize) { max_artifact_size - 1 }
it 'returns success' do
expect(execute[:status]).to eq(:success)
end
end
end
end
shared_examples_for 'handling existing artifact' do
context 'when job already has an artifact of the same file type' do
let!(:existing_artifact) do
create(:ci_job_artifact, params[:artifact_type], file_sha256: existing_sha256, job: job)
end
context 'when sha256 of uploading artifact is the same of the existing one' do
let(:existing_sha256) { artifacts_sha256 }
it 'ignores the changes' do
expect { execute }.not_to change { Ci::JobArtifact.count }
expect(execute).to match(a_hash_including(status: :success))
end
end
context 'when sha256 of uploading artifact is different than the existing one' do
let(:existing_sha256) { '1' * 64 }
it 'returns error status' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).and_call_original
expect { execute }.not_to change { Ci::JobArtifact.count }
expect(execute).to match(
a_hash_including(
http_status: :bad_request,
message: 'another artifact of the same type already exists',
status: :error
)
)
end
end
end
end
shared_examples_for 'logging artifact' do
it 'logs the created artifact' do
expect(Gitlab::Ci::Artifacts::Logger)
.to receive(:log_created)
.with(an_instance_of(Ci::JobArtifact))
execute
end
end
shared_examples_for 'handling uploads' do
context 'when artifacts file is uploaded' do
it 'creates a new job artifact' do
expect { execute }.to change { Ci::JobArtifact.count }.by(1)
new_artifact = execute[:artifact]
expect(new_artifact).to eq(job.job_artifacts.last)
expect(new_artifact.project).to eq(job.project)
expect(new_artifact.file.filename).to eq(artifacts_file.original_filename)
expect(new_artifact.file_identifier).to eq(artifacts_file.original_filename)
expect(new_artifact.file_type).to eq(params['artifact_type'])
expect(new_artifact.file_format).to eq(params['artifact_format'])
expect(new_artifact.file_sha256).to eq(artifacts_sha256)
expect(new_artifact.locked).to eq(job.pipeline.locked)
expect(new_artifact.size).to eq(artifacts_file.size)
expect(execute[:status]).to eq(:success)
end
it_behaves_like 'handling accessibility'
it_behaves_like 'handling metadata file'
it_behaves_like 'handling partitioning'
it_behaves_like 'logging artifact'
end
end
shared_examples_for 'handling partitioning' do
context 'with job partitioned', :ci_partitionable do
let(:pipeline) { create(:ci_pipeline, project: project, partition_id: ci_testing_partition_id) }
let(:job) { create(:ci_build, pipeline: pipeline) }
it 'sets partition_id on artifacts' do
expect { execute }.to change { Ci::JobArtifact.count }
artifacts_partitions = job.job_artifacts.map(&:partition_id).uniq
expect(artifacts_partitions).to eq([ci_testing_partition_id])
end
end
end
context 'when object storage and direct upload is enabled' do
let(:fog_connection) { stub_artifacts_object_storage(JobArtifactUploader, direct_upload: true) }
let(:remote_path) { File.join(remote_store_path, remote_id) }
let(:object_body) { File.open('spec/fixtures/ci_build_artifacts.zip') }
let(:upload_filename) { 'artifacts.zip' }
let(:object) do
fog_connection.directories
.new(key: 'artifacts')
.files
.create( # rubocop:disable Rails/SaveBang
key: remote_path,
body: object_body
)
end
let(:artifacts_file) do
fog_to_uploaded_file(
object,
filename: upload_filename,
sha256: artifacts_sha256,
remote_id: remote_id
)
end
let(:remote_id) { 'generated-remote-id-12345' }
let(:remote_store_path) { ObjectStorage::TMP_UPLOAD_PATH }
it_behaves_like 'handling uploads'
it_behaves_like 'handling dotenv', :object_storage
it_behaves_like 'handling annotations', :object_storage
it_behaves_like 'handling object storage errors'
it_behaves_like 'validating requirements'
end
context 'when using local storage' do
let(:artifacts_file) do
file_to_upload('spec/fixtures/ci_build_artifacts.zip', sha256: artifacts_sha256)
end
it_behaves_like 'handling uploads'
it_behaves_like 'handling dotenv', :local_storage
it_behaves_like 'handling annotations', :local_storage
it_behaves_like 'validating requirements'
end
end
def file_to_upload(path, params = {})
upload = Tempfile.new('upload')
FileUtils.copy(path, upload.path)
# This is a workaround for https://github.com/docker/for-linux/issues/1015
FileUtils.touch(upload.path)
UploadedFile.new(upload.path, **params)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class DeleteService
include BaseServiceUtility
def initialize(build)
@build = build
end
def execute
if build.project.refreshing_build_artifacts_size?
Gitlab::ProjectStatsRefreshConflictsLogger.warn_artifact_deletion_during_stats_refresh(
method: 'Ci::JobArtifacts::DeleteService#execute',
project_id: build.project_id
)
return ServiceResponse.error(
message: 'Action temporarily disabled. The project this job belongs to is undergoing stats refresh.',
reason: :project_stats_refresh
)
end
result = Ci::JobArtifacts::DestroyBatchService.new(build.job_artifacts.erasable).execute
if result.fetch(:status) == :success
ServiceResponse.success(payload:
{
destroyed_artifacts_count: result.fetch(:destroyed_artifacts_count)
})
else
ServiceResponse.error(message: result.fetch(:message))
end
end
private
attr_reader :build
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DeleteService, feature_category: :build_artifacts do
let_it_be(:build, reload: true) do
create(:ci_build, :artifacts, :trace_artifact, artifacts_expire_at: 100.days.from_now)
end
subject(:service) { described_class.new(build) }
describe '#execute' do
it 'is successful' do
result = service.execute
expect(result).to be_success
expect(result[:destroyed_artifacts_count]).to be(2)
end
it 'deletes erasable artifacts' do
expect { service.execute }.to change { build.job_artifacts.erasable.count }.from(2).to(0)
end
it 'does not delete trace' do
expect { service.execute }.not_to change { build.has_trace? }.from(true)
end
context 'when project is undergoing stats refresh' do
before do
allow(build.project).to receive(:refreshing_build_artifacts_size?).and_return(true)
end
it 'logs a warning' do
expect(Gitlab::ProjectStatsRefreshConflictsLogger)
.to receive(:warn_artifact_deletion_during_stats_refresh)
.with(method: 'Ci::JobArtifacts::DeleteService#execute', project_id: build.project_id)
service.execute
end
it 'returns an error response with the correct message and reason' do
result = service.execute
expect(result).to be_error
expect(result[:message]).to be('Action temporarily disabled. ' \
'The project this job belongs to is undergoing stats refresh.')
expect(result[:reason]).to be(:project_stats_refresh)
end
end
context 'when an error response is received from DestroyBatchService' do
before do
allow_next_instance_of(Ci::JobArtifacts::DestroyBatchService) do |service|
allow(service).to receive(:execute).and_return({ status: :error, message: 'something went wrong' })
end
end
it 'returns an error response with the correct message' do
result = service.execute
expect(result).to be_error
expect(result[:message]).to be('something went wrong')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module JobArtifacts
class ExpireProjectBuildArtifactsService
BATCH_SIZE = 1000
def initialize(project_id, expiry_time)
@project_id = project_id
@expiry_time = expiry_time
end
# rubocop:disable CodeReuse/ActiveRecord
def execute
scope = Ci::JobArtifact.for_project(project_id).order(:id)
file_type_values = Ci::JobArtifact.erasable_file_types.map { |file_type| [Ci::JobArtifact.file_types[file_type]] }
from_sql = Arel::Nodes::Grouping.new(Arel::Nodes::ValuesList.new(file_type_values)).as('file_types (file_type)').to_sql
array_scope = Ci::JobArtifact.from(from_sql).select(:file_type)
array_mapping_scope = -> (file_type_expression) { Ci::JobArtifact.where(Ci::JobArtifact.arel_table[:file_type].eq(file_type_expression)) }
Gitlab::Pagination::Keyset::Iterator
.new(scope: scope, in_operator_optimization_options: { array_scope: array_scope, array_mapping_scope: array_mapping_scope })
.each_batch(of: BATCH_SIZE) do |batch|
ids = batch.reselect!(:id).to_a.map(&:id)
Ci::JobArtifact.unlocked.where(id: ids).update_all(locked: Ci::JobArtifact.lockeds[:unlocked], expire_at: expiry_time)
end
end
# rubocop:enable CodeReuse/ActiveRecord
private
attr_reader :project_id, :expiry_time
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::ExpireProjectBuildArtifactsService, feature_category: :build_artifacts do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline, reload: true) { create(:ci_pipeline, :unlocked, project: project) }
let(:expiry_time) { Time.current }
RSpec::Matchers.define :have_locked_status do |expected_status|
match do |job_artifacts|
predicate = "#{expected_status}?".to_sym
job_artifacts.all? { |artifact| artifact.__send__(predicate) }
end
end
RSpec::Matchers.define :expire_at do |expected_expiry|
match do |job_artifacts|
job_artifacts.all? { |artifact| artifact.expire_at.to_i == expected_expiry.to_i }
end
end
RSpec::Matchers.define :have_no_expiry do
match do |job_artifacts|
job_artifacts.all? { |artifact| artifact.expire_at.nil? }
end
end
describe '#execute' do
subject(:execute) { described_class.new(project.id, expiry_time).execute }
context 'with job containing erasable artifacts' do
let_it_be(:job, reload: true) { create(:ci_build, :erasable, pipeline: pipeline) }
it 'unlocks erasable job artifacts' do
execute
expect(job.job_artifacts).to have_locked_status(:artifact_unlocked)
end
it 'expires erasable job artifacts' do
execute
expect(job.job_artifacts).to expire_at(expiry_time)
end
end
context 'with job containing trace artifacts' do
let_it_be(:job, reload: true) { create(:ci_build, :trace_artifact, pipeline: pipeline) }
it 'does not unlock trace artifacts' do
execute
expect(job.job_artifacts).to have_locked_status(:artifact_unknown)
end
it 'does not expire trace artifacts' do
execute
expect(job.job_artifacts).to have_no_expiry
end
end
context 'with job from artifact locked pipeline' do
let_it_be(:job, reload: true) { create(:ci_build, pipeline: pipeline) }
let_it_be(:locked_artifact, reload: true) { create(:ci_job_artifact, :locked, job: job) }
before do
pipeline.artifacts_locked!
end
it 'does not unlock locked artifacts' do
execute
expect(job.job_artifacts).to have_locked_status(:artifact_artifacts_locked)
end
it 'does not expire locked artifacts' do
execute
expect(job.job_artifacts).to have_no_expiry
end
end
context 'with job containing both erasable and trace artifacts' do
let_it_be(:job, reload: true) { create(:ci_build, pipeline: pipeline) }
let_it_be(:erasable_artifact, reload: true) { create(:ci_job_artifact, :archive, job: job) }
let_it_be(:trace_artifact, reload: true) { create(:ci_job_artifact, :trace, job: job) }
it 'unlocks erasable artifacts' do
execute
expect(erasable_artifact.artifact_unlocked?).to be_truthy
end
it 'expires erasable artifacts' do
execute
expect(erasable_artifact.expire_at.to_i).to eq(expiry_time.to_i)
end
it 'does not unlock trace artifacts' do
execute
expect(trace_artifact.artifact_unlocked?).to be_falsey
end
it 'does not expire trace artifacts' do
execute
expect(trace_artifact.expire_at).to be_nil
end
end
context 'with multiple pipelines' do
let_it_be(:job, reload: true) { create(:ci_build, :erasable, pipeline: pipeline) }
let_it_be(:pipeline2, reload: true) { create(:ci_pipeline, :unlocked, project: project) }
let_it_be(:job2, reload: true) { create(:ci_build, :erasable, pipeline: pipeline) }
it 'unlocks artifacts across pipelines' do
execute
expect(job.job_artifacts).to have_locked_status(:artifact_unlocked)
expect(job2.job_artifacts).to have_locked_status(:artifact_unlocked)
end
it 'expires artifacts across pipelines' do
execute
expect(job.job_artifacts).to expire_at(expiry_time)
expect(job2.job_artifacts).to expire_at(expiry_time)
end
end
context 'with artifacts belonging to another project' do
let_it_be(:job, reload: true) { create(:ci_build, :erasable, pipeline: pipeline) }
let_it_be(:another_project, reload: true) { create(:project) }
let_it_be(:another_pipeline, reload: true) { create(:ci_pipeline, project: another_project) }
let_it_be(:another_job, reload: true) { create(:ci_build, :erasable, pipeline: another_pipeline) }
it 'does not unlock erasable artifacts in other projects' do
execute
expect(another_job.job_artifacts).to have_locked_status(:artifact_unknown)
end
it 'does not expire erasable artifacts in other projects' do
execute
expect(another_job.job_artifacts).to have_no_expiry
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineSchedules
class CalculateNextRunService < BaseService
include Gitlab::Utils::StrongMemoize
def execute(schedule, fallback_method:)
@schedule = schedule
return fallback_method.call unless plan_cron&.cron_valid?
now = Time.zone.now
plan_min_run = plan_cron.next_time_from(now)
schedule_next_run = schedule_cron.next_time_from(now)
return schedule_next_run if worker_cron.match?(schedule_next_run) && plan_min_run <= schedule_next_run
plan_next_run = plan_cron.next_time_from(schedule_next_run)
return plan_next_run if worker_cron.match?(plan_next_run)
worker_next_run = worker_cron.next_time_from(schedule_next_run)
return worker_next_run if plan_min_run <= worker_next_run
worker_cron.next_time_from(plan_next_run)
end
private
def schedule_cron
strong_memoize(:schedule_cron) do
Gitlab::Ci::CronParser.new(@schedule.cron, @schedule.cron_timezone)
end
end
def worker_cron
strong_memoize(:worker_cron) do
Gitlab::Ci::CronParser.new(@schedule.worker_cron_expression, Time.zone.name)
end
end
def plan_cron
strong_memoize(:plan_cron) do
daily_limit = @schedule.daily_limit
next unless daily_limit
every_x_minutes = (1.day.in_minutes / daily_limit).to_i
Gitlab::Ci::CronParser.parse_natural("every #{every_x_minutes} minutes", Time.zone.name)
end
end
end
end
end
``` | # frozen_string_literal: true
# rubocop:disable Layout/LineLength
require 'spec_helper'
RSpec.describe Ci::PipelineSchedules::CalculateNextRunService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :public, :repository) }
describe '#execute' do
using RSpec::Parameterized::TableSyntax
let(:run_service) do
described_class.new(project).execute(pipeline_schedule,
fallback_method: pipeline_schedule.method(:calculate_next_run_at))
end
let(:pipeline_schedule) { create(:ci_pipeline_schedule, cron: schedule_cron) }
let(:daily_limit_of_144_runs) { 1.day / 10.minutes }
let(:daily_limit_of_24_runs) { 1.day / 1.hour }
before do
allow(Settings).to receive(:cron_jobs) { { 'pipeline_schedule_worker' => { 'cron' => worker_cron } } }
create(:plan_limits, :default_plan, ci_daily_pipeline_schedule_triggers: plan_limit) if plan_limit
end
context "when there is invalid or no plan limits" do
where(:worker_cron, :schedule_cron, :plan_limit, :now, :expected_result) do
'0 1 2 3 *' | '0 1 * * *' | nil | Time.zone.local(2021, 3, 2, 1, 0) | Time.zone.local(2022, 3, 2, 1, 0)
'*/5 * * * *' | '*/1 * * * *' | nil | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 11, 5)
'*/5 * * * *' | '0 * * * *' | nil | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 5)
# 1.day / 2.hours => 12 times a day and it is invalid because there is a minimum for plan limits.
# See: https://docs.gitlab.com/ee/administration/instance_limits.html#limit-the-number-of-pipelines-created-by-a-pipeline-schedule-per-day
'*/5 * * * *' | '0 * * * *' | 1.day / 2.hours | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 5)
end
with_them do
it 'calls fallback method to get next_run_at' do
travel_to(now) do
expect(pipeline_schedule).to receive(:calculate_next_run_at).and_call_original
result = run_service
expect(result).to eq(expected_result)
end
end
end
end
context "when the workers next run matches schedule's earliest run" do
where(:worker_cron, :schedule_cron, :plan_limit, :now, :expected_result) do
'*/5 * * * *' | '0 * * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 0)
'*/5 * * * *' | '*/5 * * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 11, 10)
'*/5 * * * *' | '0 1 * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 28, 1, 0)
'*/5 * * * *' | '0 2 * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 27, 2, 0)
'*/5 * * * *' | '0 3 * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 1, 0) | Time.zone.local(2021, 5, 27, 3, 0)
'*/5 * * * *' | '0 1 1 * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 1, 1, 0) | Time.zone.local(2021, 6, 1, 1, 0)
'*/9 * * * *' | '0 1 1 * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 1, 1, 9) | Time.zone.local(2021, 6, 1, 1, 0)
'*/5 * * * *' | '45 21 1 2 *' | daily_limit_of_144_runs | Time.zone.local(2021, 2, 1, 21, 45) | Time.zone.local(2022, 2, 1, 21, 45)
end
with_them do
it 'calculates the next_run_at to be earliest point of match' do
travel_to(now) do
result = run_service
expect(result).to eq(expected_result)
end
end
end
end
context "when next_run_at is restricted by plan limit" do
where(:worker_cron, :schedule_cron, :plan_limit, :now, :expected_result) do
'*/5 * * * *' | '59 14 * * *' | daily_limit_of_24_runs | Time.zone.local(2021, 5, 1, 15, 0) | Time.zone.local(2021, 5, 2, 15, 0)
'*/5 * * * *' | '*/1 * * * *' | daily_limit_of_24_runs | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 12, 0)
'*/5 * * * *' | '*/1 * * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 11, 10)
'*/5 * * * *' | '*/1 * * * *' | (1.day / 7.minutes).to_i | Time.zone.local(2021, 5, 27, 11, 0) | Time.zone.local(2021, 5, 27, 11, 10)
end
with_them do
it 'calculates the next_run_at based on next available limit' do
travel_to(now) do
result = run_service
expect(result).to eq(expected_result)
end
end
end
end
context "when next_run_at is restricted by worker's availability" do
where(:worker_cron, :schedule_cron, :plan_limit, :now, :expected_result) do
'0 1 2 3 *' | '0 1 * * *' | daily_limit_of_144_runs | Time.zone.local(2021, 3, 2, 1, 0) | Time.zone.local(2022, 3, 2, 1, 0)
end
with_them do
it 'calculates the next_run_at using worker_cron' do
travel_to(now) do
result = run_service
expect(result).to eq(expected_result)
end
end
end
end
end
end
# rubocop:enable Layout/LineLength
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineSchedules
class TakeOwnershipService
def initialize(schedule, user)
@schedule = schedule
@user = user
end
def execute
return forbidden unless allowed?
if schedule.update(owner: user)
ServiceResponse.success(payload: schedule)
else
ServiceResponse.error(message: schedule.errors.full_messages)
end
end
private
attr_reader :schedule, :user
def allowed?
user.can?(:admin_pipeline_schedule, schedule)
end
def forbidden
ServiceResponse.error(message: _('Failed to change the owner'), reason: :access_denied)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineSchedules::TakeOwnershipService, feature_category: :continuous_integration do
let_it_be(:user) { create(:user) }
let_it_be(:owner) { create(:user) }
let_it_be(:reporter) { create(:user) }
let_it_be(:project) { create(:project, :public, :repository) }
let_it_be(:pipeline_schedule) { create(:ci_pipeline_schedule, project: project, owner: owner) }
before_all do
project.add_maintainer(user)
project.add_maintainer(owner)
project.add_reporter(reporter)
end
describe '#execute' do
context 'when user does not have permission' do
subject(:service) { described_class.new(pipeline_schedule, reporter) }
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
expect(result.message).to eq(_('Failed to change the owner'))
end
end
context 'when user has permission' do
subject(:service) { described_class.new(pipeline_schedule, user) }
it 'returns ServiceResponse.success' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.success?).to be(true)
expect(result.payload).to eq(pipeline_schedule)
end
context 'when schedule update fails' do
subject(:service) { described_class.new(pipeline_schedule, owner) }
before do
allow(pipeline_schedule).to receive(:update).and_return(false)
errors = ActiveModel::Errors.new(pipeline_schedule)
errors.add(:base, 'An error occurred')
allow(pipeline_schedule).to receive(:errors).and_return(errors)
end
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
expect(result.message).to eq(['An error occurred'])
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineSchedules
class UpdateService < BaseSaveService
AUTHORIZE = :update_pipeline_schedule
def initialize(schedule, user, params)
@schedule = schedule
@user = user
@project = schedule.project
@params = params
end
def execute
return forbidden_to_save unless allowed_to_save?
super
end
private
def authorize_message
_('The current user is not authorized to update the pipeline schedule')
end
strong_memoize_attr :authorize_message
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineSchedules::UpdateService, feature_category: :continuous_integration do
let_it_be_with_reload(:user) { create(:user) }
let_it_be_with_reload(:project) { create(:project, :public, :repository) }
let_it_be_with_reload(:pipeline_schedule) { create(:ci_pipeline_schedule, project: project, owner: user) }
let_it_be(:reporter) { create(:user) }
let_it_be(:project_owner) { create(:user) }
let_it_be(:pipeline_schedule_variable) do
create(:ci_pipeline_schedule_variable,
key: 'foo', value: 'foovalue', pipeline_schedule: pipeline_schedule)
end
before_all do
project.add_maintainer(user)
project.add_owner(project_owner)
project.add_reporter(reporter)
pipeline_schedule.reload
end
describe "execute" do
context 'when user does not have permission' do
subject(:service) { described_class.new(pipeline_schedule, reporter, {}) }
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
error_message = _('The current user is not authorized to update the pipeline schedule')
expect(result.message).to match_array([error_message])
expect(pipeline_schedule.errors).to match_array([error_message])
end
end
context 'when user has permission' do
let(:params) do
{
description: 'updated_desc',
ref: 'patch-x',
active: false,
cron: '*/1 * * * *',
variables_attributes: [
{ id: pipeline_schedule_variable.id, key: 'bar', secret_value: 'barvalue' }
]
}
end
subject(:service) { described_class.new(pipeline_schedule, user, params) }
it 'updates database values with passed params' do
expect do
service.execute
pipeline_schedule.reload
end.to change { pipeline_schedule.description }.from('pipeline schedule').to('updated_desc')
.and change { pipeline_schedule.ref }.from('master').to('patch-x')
.and change { pipeline_schedule.active }.from(true).to(false)
.and change { pipeline_schedule.cron }.from('0 1 * * *').to('*/1 * * * *')
.and change { pipeline_schedule.variables.last.key }.from('foo').to('bar')
.and change { pipeline_schedule.variables.last.value }.from('foovalue').to('barvalue')
end
context 'when the new branch is protected', :request_store do
let(:maintainer_access) { :no_one_can_merge }
before do
create(:protected_branch, :no_one_can_push, maintainer_access, name: 'patch-x', project: project)
end
after do
ProtectedBranches::CacheService.new(project).refresh
end
context 'when called by someone other than the schedule owner who can update the ref' do
let(:maintainer_access) { :maintainers_can_merge }
subject(:service) { described_class.new(pipeline_schedule, project_owner, params) }
it 'does not update the schedule' do
expect do
service.execute
pipeline_schedule.reload
end.not_to change { pipeline_schedule.description }
end
end
context 'when called by the schedule owner' do
it 'does not update the schedule' do
expect do
service.execute
pipeline_schedule.reload
end.not_to change { pipeline_schedule.description }
end
context 'when the owner can update the ref' do
let(:maintainer_access) { :maintainers_can_merge }
it 'updates the schedule' do
expect { service.execute }.to change { pipeline_schedule.description }
end
end
end
end
context 'when creating a variable' do
let(:params) do
{
variables_attributes: [
{ key: 'ABC', secret_value: 'ABC123' }
]
}
end
it 'creates the new variable' do
expect { service.execute }.to change { Ci::PipelineScheduleVariable.count }.by(1)
expect(pipeline_schedule.variables.last.key).to eq('ABC')
expect(pipeline_schedule.variables.last.value).to eq('ABC123')
end
end
context 'when deleting a variable' do
let(:params) do
{
variables_attributes: [
{
id: pipeline_schedule_variable.id,
_destroy: true
}
]
}
end
it 'deletes the existing variable' do
expect { service.execute }.to change { Ci::PipelineScheduleVariable.count }.by(-1)
end
end
it 'returns ServiceResponse.success' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.success?).to be(true)
expect(result.payload.description).to eq('updated_desc')
end
context 'when schedule update fails' do
subject(:service) { described_class.new(pipeline_schedule, user, {}) }
before do
allow(pipeline_schedule).to receive(:save).and_return(false)
errors = ActiveModel::Errors.new(pipeline_schedule)
errors.add(:base, 'An error occurred')
allow(pipeline_schedule).to receive(:errors).and_return(errors)
end
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
expect(result.message).to match_array(['An error occurred'])
end
end
end
it_behaves_like 'pipeline schedules checking variables permission' do
subject(:service) { described_class.new(pipeline_schedule, user, params) }
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineSchedules
class CreateService < BaseSaveService
AUTHORIZE = :create_pipeline_schedule
def initialize(project, user, params)
@schedule = project.pipeline_schedules.new
@user = user
@project = project
@params = params.merge(owner: user)
end
private
def authorize_message
_('The current user is not authorized to create the pipeline schedule')
end
strong_memoize_attr :authorize_message
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineSchedules::CreateService, feature_category: :continuous_integration do
let_it_be(:reporter) { create(:user) }
let_it_be_with_reload(:user) { create(:user) }
let_it_be_with_reload(:project) { create(:project, :public, :repository) }
subject(:service) { described_class.new(project, user, params) }
before_all do
project.add_maintainer(user)
project.add_reporter(reporter)
end
describe "execute" do
context 'when user does not have permission' do
subject(:service) { described_class.new(project, reporter, {}) }
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
error_message = _('The current user is not authorized to create the pipeline schedule')
expect(result.message).to match_array([error_message])
expect(result.payload.errors).to match_array([error_message])
end
end
context 'when user has permission' do
let(:params) do
{
description: 'desc',
ref: 'patch-x',
active: false,
cron: '*/1 * * * *',
cron_timezone: 'UTC'
}
end
subject(:service) { described_class.new(project, user, params) }
it 'saves values with passed params' do
result = service.execute
expect(result.payload).to have_attributes(
description: 'desc',
ref: 'patch-x',
active: false,
cron: '*/1 * * * *',
cron_timezone: 'UTC'
)
end
it 'returns ServiceResponse.success' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.success?).to be(true)
end
context 'when schedule save fails' do
subject(:service) { described_class.new(project, user, {}) }
before do
errors = ActiveModel::Errors.new(project)
errors.add(:base, 'An error occurred')
allow_next_instance_of(Ci::PipelineSchedule) do |instance|
allow(instance).to receive(:save).and_return(false)
allow(instance).to receive(:errors).and_return(errors)
end
end
it 'returns ServiceResponse.error' do
result = service.execute
expect(result).to be_a(ServiceResponse)
expect(result.error?).to be(true)
expect(result.message).to match_array(['An error occurred'])
end
end
end
it_behaves_like 'pipeline schedules checking variables permission'
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineCreation
class CancelRedundantPipelinesService
include Gitlab::Utils::StrongMemoize
BATCH_SIZE = 25
PAGE_SIZE = 500
def initialize(pipeline)
@pipeline = pipeline
@project = @pipeline.project
end
# rubocop: disable CodeReuse/ActiveRecord
def execute
return if service_disabled?
return if pipeline.parent_pipeline? # skip if child pipeline
return unless project.auto_cancel_pending_pipelines?
paginator.each do |ids|
pipelines = parent_and_child_pipelines(ids)
Gitlab::OptimisticLocking.retry_lock(pipelines, name: 'cancel_pending_pipelines') do |cancelables|
auto_cancel_interruptible_pipelines(cancelables.ids)
end
end
end
private
attr_reader :pipeline, :project
def paginator
page = 1
Enumerator.new do |yielder|
loop do
# leverage the index_ci_pipelines_on_project_id_and_status_and_created_at index
records = project.all_pipelines
.created_after(pipelines_created_after)
.order(:status, :created_at)
.page(page) # use offset pagination because there is no other way to loop over the data
.per(PAGE_SIZE)
.pluck(:id)
raise StopIteration if records.empty?
yielder << records
page += 1
end
end
end
def parent_auto_cancelable_pipelines(ids)
scope = project.all_pipelines
.created_after(pipelines_created_after)
.for_ref(pipeline.ref)
.where_not_sha(project.commit(pipeline.ref).try(:id))
.where("created_at < ?", pipeline.created_at)
.for_status(CommitStatus::AVAILABLE_STATUSES) # Force usage of project_id_and_status_and_created_at_index
.ci_sources
scope.id_in(ids)
end
def parent_and_child_pipelines(ids)
Ci::Pipeline.object_hierarchy(parent_auto_cancelable_pipelines(ids), project_condition: :same)
.base_and_descendants
.alive_or_scheduled
end
# rubocop: enable CodeReuse/ActiveRecord
def auto_cancel_interruptible_pipelines(pipeline_ids)
::Ci::Pipeline
.id_in(pipeline_ids)
.with_only_interruptible_builds
.each do |cancelable_pipeline|
Gitlab::AppLogger.info(
class: self.class.name,
message: "Pipeline #{pipeline.id} auto-canceling pipeline #{cancelable_pipeline.id}",
canceled_pipeline_id: cancelable_pipeline.id,
canceled_by_pipeline_id: pipeline.id,
canceled_by_pipeline_source: pipeline.source
)
# cascade_to_children not needed because we iterate through descendants here
::Ci::CancelPipelineService.new(
pipeline: cancelable_pipeline,
current_user: nil,
auto_canceled_by_pipeline: pipeline,
cascade_to_children: false
).force_execute
end
end
def pipelines_created_after
3.days.ago
end
# Finding the pipelines to cancel is an expensive task that is not well
# covered by indexes for all project use-cases and sometimes it might
# harm other services. See https://gitlab.com/gitlab-com/gl-infra/production/-/issues/14758
# This feature flag is in place to disable this feature for rogue projects.
#
def service_disabled?
Feature.enabled?(:disable_cancel_redundant_pipelines_service, project, type: :ops)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineCreation::CancelRedundantPipelinesService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
let(:prev_pipeline) { create(:ci_pipeline, project: project) }
let!(:new_commit) { create(:commit, project: project) }
let(:pipeline) { create(:ci_pipeline, project: project, sha: new_commit.sha) }
let(:service) { described_class.new(pipeline) }
before do
create(:ci_build, :interruptible, :running, pipeline: prev_pipeline)
create(:ci_build, :interruptible, :success, pipeline: prev_pipeline)
create(:ci_build, :created, pipeline: prev_pipeline)
create(:ci_build, :interruptible, pipeline: pipeline)
end
shared_examples 'time limits pipeline cancellation' do
context 'with old pipelines' do
let(:old_pipeline) { create(:ci_pipeline, project: project, created_at: 5.days.ago) }
before do
create(:ci_build, :interruptible, :pending, pipeline: old_pipeline)
end
it 'ignores old pipelines' do
execute
expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
expect(build_statuses(pipeline)).to contain_exactly('pending')
expect(build_statuses(old_pipeline)).to contain_exactly('pending')
end
end
end
describe '#execute!' do
subject(:execute) { service.execute }
context 'when build statuses are set up correctly' do
it 'has builds of all statuses' do
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
expect(build_statuses(pipeline)).to contain_exactly('pending')
end
end
context 'when auto-cancel is enabled' do
before do
project.update!(auto_cancel_pending_pipelines: 'enabled')
end
it 'cancels only previous interruptible builds' do
execute
expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
expect(build_statuses(pipeline)).to contain_exactly('pending')
end
it 'logs canceled pipelines' do
allow(Gitlab::AppLogger).to receive(:info)
execute
expect(Gitlab::AppLogger).to have_received(:info).with(
class: described_class.name,
message: "Pipeline #{pipeline.id} auto-canceling pipeline #{prev_pipeline.id}",
canceled_pipeline_id: prev_pipeline.id,
canceled_by_pipeline_id: pipeline.id,
canceled_by_pipeline_source: pipeline.source
)
end
context 'when the previous pipeline has a child pipeline' do
let(:child_pipeline) { create(:ci_pipeline, child_of: prev_pipeline) }
context 'with another nested child pipeline' do
let(:another_child_pipeline) { create(:ci_pipeline, child_of: child_pipeline) }
before do
create(:ci_build, :interruptible, :running, pipeline: another_child_pipeline)
create(:ci_build, :interruptible, :running, pipeline: another_child_pipeline)
end
it 'cancels all nested child pipeline builds' do
expect(build_statuses(another_child_pipeline)).to contain_exactly('running', 'running')
execute
expect(build_statuses(another_child_pipeline)).to contain_exactly('canceled', 'canceled')
end
end
context 'when started after pipeline was finished' do
before do
create(:ci_build, :interruptible, :running, pipeline: child_pipeline)
prev_pipeline.update!(status: "success")
end
it 'cancels child pipeline builds' do
expect(build_statuses(child_pipeline)).to contain_exactly('running')
execute
expect(build_statuses(child_pipeline)).to contain_exactly('canceled')
end
end
context 'when the child pipeline has interruptible running jobs' do
before do
create(:ci_build, :interruptible, :running, pipeline: child_pipeline)
create(:ci_build, :interruptible, :running, pipeline: child_pipeline)
end
it 'cancels all child pipeline builds' do
expect(build_statuses(child_pipeline)).to contain_exactly('running', 'running')
execute
expect(build_statuses(child_pipeline)).to contain_exactly('canceled', 'canceled')
end
context 'when the child pipeline includes completed interruptible jobs' do
before do
create(:ci_build, :interruptible, :failed, pipeline: child_pipeline)
create(:ci_build, :interruptible, :success, pipeline: child_pipeline)
end
it 'cancels all child pipeline builds with a cancelable_status' do
expect(build_statuses(child_pipeline)).to contain_exactly('running', 'running', 'failed', 'success')
execute
expect(build_statuses(child_pipeline)).to contain_exactly('canceled', 'canceled', 'failed', 'success')
end
end
end
context 'when the child pipeline has started non-interruptible job' do
before do
create(:ci_build, :interruptible, :running, pipeline: child_pipeline)
# non-interruptible started
create(:ci_build, :success, pipeline: child_pipeline)
end
it 'does not cancel any child pipeline builds' do
expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
execute
expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
end
end
context 'when the child pipeline has non-interruptible non-started job' do
before do
create(:ci_build, :interruptible, :running, pipeline: child_pipeline)
end
not_started_statuses = Ci::HasStatus::AVAILABLE_STATUSES - Ci::HasStatus::STARTED_STATUSES
context 'when the jobs are cancelable' do
cancelable_not_started_statuses =
Set.new(not_started_statuses).intersection(Ci::HasStatus::CANCELABLE_STATUSES)
cancelable_not_started_statuses.each do |status|
it "cancels all child pipeline builds when build status #{status} included" do
# non-interruptible but non-started
create(:ci_build, status.to_sym, pipeline: child_pipeline)
expect(build_statuses(child_pipeline)).to contain_exactly('running', status)
execute
expect(build_statuses(child_pipeline)).to contain_exactly('canceled', 'canceled')
end
end
end
context 'when the jobs are not cancelable' do
not_cancelable_not_started_statuses = not_started_statuses - Ci::HasStatus::CANCELABLE_STATUSES
not_cancelable_not_started_statuses.each do |status|
it "does not cancel child pipeline builds when build status #{status} included" do
# non-interruptible but non-started
create(:ci_build, status.to_sym, pipeline: child_pipeline)
expect(build_statuses(child_pipeline)).to contain_exactly('running', status)
execute
expect(build_statuses(child_pipeline)).to contain_exactly('canceled', status)
end
end
end
end
end
context 'when the pipeline is a child pipeline' do
let!(:parent_pipeline) { create(:ci_pipeline, project: project, sha: new_commit.sha) }
let(:pipeline) { create(:ci_pipeline, child_of: parent_pipeline) }
before do
create(:ci_build, :interruptible, :running, pipeline: parent_pipeline)
create(:ci_build, :interruptible, :running, pipeline: parent_pipeline)
end
it 'does not cancel any builds' do
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
expect(build_statuses(parent_pipeline)).to contain_exactly('running', 'running')
execute
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
expect(build_statuses(parent_pipeline)).to contain_exactly('running', 'running')
end
end
context 'when the previous pipeline source is webide' do
let(:prev_pipeline) { create(:ci_pipeline, :webide, project: project) }
it 'does not cancel builds of the previous pipeline' do
execute
expect(build_statuses(prev_pipeline)).to contain_exactly('created', 'running', 'success')
expect(build_statuses(pipeline)).to contain_exactly('pending')
end
end
it 'does not cancel future pipelines' do
expect(prev_pipeline.id).to be < pipeline.id
expect(build_statuses(pipeline)).to contain_exactly('pending')
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
described_class.new(prev_pipeline).execute
expect(build_statuses(pipeline.reload)).to contain_exactly('pending')
end
it_behaves_like 'time limits pipeline cancellation'
end
context 'when auto-cancel is disabled' do
before do
project.update!(auto_cancel_pending_pipelines: 'disabled')
end
it 'does not cancel any build' do
subject
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
expect(build_statuses(pipeline)).to contain_exactly('pending')
end
end
context 'when enable_cancel_redundant_pipelines_service FF is enabled' do
before do
stub_feature_flags(disable_cancel_redundant_pipelines_service: true)
end
it 'does not cancel any build' do
subject
expect(build_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
expect(build_statuses(pipeline)).to contain_exactly('pending')
end
end
end
private
def build_statuses(pipeline)
pipeline.builds.pluck(:status)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module PipelineCreation
class StartPipelineService
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
end
def execute
##
# Create a persistent ref for the pipeline.
# The pipeline ref is fetched in the jobs and deleted when the pipeline transitions to a finished state.
pipeline.ensure_persistent_ref
Ci::ProcessPipelineService.new(pipeline).execute
end
end
end
end
::Ci::PipelineCreation::StartPipelineService.prepend_mod_with('Ci::PipelineCreation::StartPipelineService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineCreation::StartPipelineService, feature_category: :continuous_integration do
let(:pipeline) { build(:ci_pipeline) }
subject(:service) { described_class.new(pipeline) }
describe '#execute' do
it 'calls the pipeline process service' do
expect(Ci::ProcessPipelineService)
.to receive(:new)
.with(pipeline)
.and_return(double('service', execute: true))
service.execute
end
it 'creates pipeline ref' do
expect(pipeline.persistent_ref).to receive(:create).once
service.execute
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Ci
module Deployments
class DestroyService < BaseService
def execute(deployment)
raise Gitlab::Access::AccessDeniedError unless can?(current_user, :destroy_deployment, deployment)
return ServiceResponse.error(message: 'Cannot destroy running deployment') if deployment&.running?
return ServiceResponse.error(message: 'Deployment currently deployed to environment') if deployment&.last?
project.destroy_deployment_by_id(deployment)
ServiceResponse.success(message: 'Deployment destroyed')
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Ci::Deployments::DestroyService, feature_category: :continuous_integration do
let_it_be(:project) { create(:project, :repository) }
let(:environment) { create(:environment, project: project) }
let(:commits) { project.repository.commits(nil, { limit: 3 }) }
let!(:deploy) do
create(
:deployment,
:success,
project: project,
environment: environment,
deployable: nil,
sha: commits[2].sha
)
end
let!(:running_deploy) do
create(
:deployment,
:running,
project: project,
environment: environment,
deployable: nil,
sha: commits[1].sha
)
end
let!(:old_deploy) do
create(
:deployment,
:success,
project: project,
environment: environment,
deployable: nil,
sha: commits[0].sha,
finished_at: 1.year.ago
)
end
let(:user) { project.first_owner }
subject { described_class.new(project, user) }
context 'when deleting a deployment' do
it 'delete is accepted for old deployment' do
expect(subject.execute(old_deploy)).to be_success
end
it 'does not delete a running deployment' do
response = subject.execute(running_deploy)
expect(response).to be_an_error
expect(response.message).to eq("Cannot destroy running deployment")
end
it 'does not delete the last deployment' do
response = subject.execute(deploy)
expect(response).to be_an_error
expect(response.message).to eq("Deployment currently deployed to environment")
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class ResetAutoStopService < ::BaseService
def execute(environment)
return error(_('Failed to cancel auto stop because you do not have permission to update the environment.')) unless can_update_environment?(environment)
return error(_('Failed to cancel auto stop because the environment is not set as auto stop.')) unless environment.auto_stop_at?
if environment.reset_auto_stop
success
else
error(_('Failed to cancel auto stop because failed to update the environment.'))
end
end
private
def can_update_environment?(environment)
can?(current_user, :update_environment, environment)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::ResetAutoStopService, feature_category: :continuous_delivery do
let_it_be(:project) { create(:project) }
let_it_be(:developer) { create(:user).tap { |user| project.add_developer(user) } }
let_it_be(:reporter) { create(:user).tap { |user| project.add_reporter(user) } }
let(:user) { developer }
let(:service) { described_class.new(project, user) }
describe '#execute' do
subject { service.execute(environment) }
context 'when environment will be stopped automatically' do
let(:environment) { create(:environment, :will_auto_stop, project: project) }
it 'resets auto stop' do
expect(environment).to receive(:reset_auto_stop).and_call_original
expect(subject[:status]).to eq(:success)
end
context 'when failed to reset auto stop' do
before do
expect(environment).to receive(:reset_auto_stop) { false }
end
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Failed to cancel auto stop because failed to update the environment.')
end
end
context 'when user is reporter' do
let(:user) { reporter }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Failed to cancel auto stop because you do not have permission to update the environment.')
end
end
end
context 'when environment will not be stopped automatically' do
let(:environment) { create(:environment, project: project) }
it 'returns error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq('Failed to cancel auto stop because the environment is not set as auto stop.')
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class ScheduleToDeleteReviewAppsService < ::BaseService
include ::Gitlab::ExclusiveLeaseHelpers
EXCLUSIVE_LOCK_KEY_BASE = 'environments:delete_review_apps:lock'
LOCK_TIMEOUT = 2.minutes
def execute
if validation_error = validate
return validation_error
end
mark_deletable_environments
end
private
def key
"#{EXCLUSIVE_LOCK_KEY_BASE}:#{project.id}"
end
def dry_run?
return true if params[:dry_run].nil?
params[:dry_run]
end
def validate
return if can?(current_user, :destroy_environment, project)
Result.new(error_message: "You do not have permission to destroy environments in this project", status: :unauthorized)
end
def mark_deletable_environments
in_lock(key, ttl: LOCK_TIMEOUT, retries: 1) do
unsafe_mark_deletable_environments
end
rescue FailedToObtainLockError
Result.new(error_message: "Another process is already processing a delete request. Please retry later.", status: :conflict)
end
def unsafe_mark_deletable_environments
result = Result.new
environments = project.environments
.not_scheduled_for_deletion
.stopped_review_apps(params[:before], params[:limit])
# Check if the actor has write permission to a potentially-protected environment.
deletable, failed = *environments.partition { |env| current_user.can?(:destroy_environment, env) }
if deletable.any? && failed.empty?
mark_for_deletion(deletable) unless dry_run?
result.set_status(:ok)
result.set_scheduled_entries(deletable)
else
result.set_status(
:bad_request,
error_message: "No environments found for scheduled deletion. Either your query did not match any environments (default parameters match environments that are 30 days or older), or you have insufficient permissions to delete matching environments."
)
result.set_unprocessable_entries(failed)
end
result
end
def mark_for_deletion(deletable_environments)
Environment.id_in(deletable_environments).schedule_to_delete
end
class Result
attr_accessor :scheduled_entries, :unprocessable_entries, :error_message, :status
def initialize(scheduled_entries: [], unprocessable_entries: [], error_message: nil, status: nil)
self.scheduled_entries = scheduled_entries
self.unprocessable_entries = unprocessable_entries
self.error_message = error_message
self.status = status
end
def success?
status == :ok
end
def set_status(status, error_message: nil)
self.status = status
self.error_message = error_message
end
def set_scheduled_entries(entries)
self.scheduled_entries = entries
end
def set_unprocessable_entries(entries)
self.unprocessable_entries = entries
end
end
end
end
``` | # frozen_string_literal: true
require "spec_helper"
RSpec.describe Environments::ScheduleToDeleteReviewAppsService, feature_category: :continuous_delivery do
include ExclusiveLeaseHelpers
let_it_be(:maintainer) { create(:user) }
let_it_be(:developer) { create(:user) }
let_it_be(:reporter) { create(:user) }
let_it_be(:project) { create(:project, :private, :repository, namespace: maintainer.namespace) }
let(:service) { described_class.new(project, current_user, before: 30.days.ago, dry_run: dry_run) }
let(:dry_run) { false }
let(:current_user) { maintainer }
before do
project.add_maintainer(maintainer)
project.add_developer(developer)
project.add_reporter(reporter)
end
describe "#execute" do
subject { service.execute }
shared_examples "can schedule for deletion" do
let!(:old_stopped_review_env) { create(:environment, :with_review_app, :stopped, created_at: 31.days.ago, project: project) }
let!(:new_stopped_review_env) { create(:environment, :with_review_app, :stopped, project: project) }
let!(:old_active_review_env) { create(:environment, :with_review_app, :available, created_at: 31.days.ago, project: project) }
let!(:old_stopped_other_env) { create(:environment, :stopped, created_at: 31.days.ago, project: project) }
let!(:new_stopped_other_env) { create(:environment, :stopped, project: project) }
let!(:old_active_other_env) { create(:environment, :available, created_at: 31.days.ago, project: project) }
let!(:already_deleting_env) { create(:environment, :with_review_app, :stopped, created_at: 31.days.ago, project: project, auto_delete_at: 1.day.from_now) }
let(:already_deleting_time) { already_deleting_env.reload.auto_delete_at }
context "live run" do
let(:dry_run) { false }
around do |example|
freeze_time { example.run }
end
it "marks the correct environment as scheduled_entries" do
expect(subject.success?).to be_truthy
expect(subject.scheduled_entries).to contain_exactly(old_stopped_review_env)
expect(subject.unprocessable_entries).to be_empty
old_stopped_review_env.reload
new_stopped_review_env.reload
old_active_review_env.reload
old_stopped_other_env.reload
new_stopped_other_env.reload
old_active_other_env.reload
already_deleting_env.reload
expect(old_stopped_review_env.auto_delete_at).to eq(1.week.from_now)
expect(new_stopped_review_env.auto_delete_at).to be_nil
expect(old_active_review_env.auto_delete_at).to be_nil
expect(old_stopped_other_env.auto_delete_at).to be_nil
expect(new_stopped_other_env.auto_delete_at).to be_nil
expect(old_active_other_env.auto_delete_at).to be_nil
expect(already_deleting_env.auto_delete_at).to eq(already_deleting_time)
end
end
context "dry run" do
let(:dry_run) { true }
it "returns the same but doesn't update the record" do
expect(subject.success?).to be_truthy
expect(subject.scheduled_entries).to contain_exactly(old_stopped_review_env)
expect(subject.unprocessable_entries).to be_empty
old_stopped_review_env.reload
new_stopped_review_env.reload
old_active_review_env.reload
old_stopped_other_env.reload
new_stopped_other_env.reload
old_active_other_env.reload
already_deleting_env.reload
expect(old_stopped_review_env.auto_delete_at).to be_nil
expect(new_stopped_review_env.auto_delete_at).to be_nil
expect(old_active_review_env.auto_delete_at).to be_nil
expect(old_stopped_other_env.auto_delete_at).to be_nil
expect(new_stopped_other_env.auto_delete_at).to be_nil
expect(old_active_other_env.auto_delete_at).to be_nil
expect(already_deleting_env.auto_delete_at).to eq(already_deleting_time)
end
end
describe "execution in parallel" do
before do
stub_exclusive_lease_taken(service.send(:key))
end
it "does not execute unsafe_mark_scheduled_entries_environments" do
expect(service).not_to receive(:unsafe_mark_scheduled_entries_environments)
expect(subject.success?).to be_falsey
expect(subject.status).to eq(:conflict)
end
end
end
context "as a maintainer" do
let(:current_user) { maintainer }
it_behaves_like "can schedule for deletion"
end
context "as a developer" do
let(:current_user) { developer }
it_behaves_like "can schedule for deletion"
end
context "as a reporter" do
let(:current_user) { reporter }
it "fails to delete environments" do
old_stopped_review_env = create(:environment, :with_review_app, :stopped, created_at: 31.days.ago, project: project)
expect(subject.success?).to be_falsey
# Both of these should be empty as we fail before testing them
expect(subject.scheduled_entries).to be_empty
expect(subject.unprocessable_entries).to be_empty
old_stopped_review_env.reload
expect(old_stopped_review_env.auto_delete_at).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
# This class creates an environment record for a pipeline job.
class CreateForJobService
def execute(job)
return unless job.is_a?(::Ci::Processable) && job.has_environment_keyword?
environment = to_resource(job)
if environment.persisted?
job.persisted_environment = environment
job.assign_attributes(metadata_attributes: { expanded_environment_name: environment.name })
else
job.assign_attributes(status: :failed, failure_reason: :environment_creation_failure)
end
environment
end
private
# rubocop: disable Performance/ActiveRecordSubtransactionMethods
def to_resource(job)
job.project.environments.safe_find_or_create_by(name: job.expanded_environment_name) do |environment|
# Initialize the attributes at creation
environment.auto_stop_in = expanded_auto_stop_in(job)
environment.tier = job.environment_tier_from_options
environment.merge_request = job.pipeline.merge_request
end
end
# rubocop: enable Performance/ActiveRecordSubtransactionMethods
def expanded_auto_stop_in(job)
return unless job.environment_auto_stop_in
ExpandVariables.expand(job.environment_auto_stop_in, -> { job.simple_variables.sort_and_expand_all })
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::CreateForJobService, feature_category: :continuous_delivery do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let(:service) { described_class.new }
it_behaves_like 'create environment for job' do
let(:factory_type) { :ci_build }
end
it_behaves_like 'create environment for job' do
let(:factory_type) { :ci_bridge }
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class AutoStopService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::LoopHelpers
BATCH_SIZE = 100
LOOP_TIMEOUT = 45.minutes
LOOP_LIMIT = 1000
EXCLUSIVE_LOCK_KEY = 'environments:auto_stop:lock'
LOCK_TIMEOUT = 50.minutes
##
# Stop expired environments on GitLab instance
#
# This auto stop process cannot run for more than 45 minutes. This is for
# preventing multiple `AutoStopCronWorker` CRON jobs run concurrently,
# which is scheduled at every hour.
def execute
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
stop_in_batch
end
end
end
private
def stop_in_batch
environments = Environment.preload_project.select(:id, :project_id).auto_stoppable(BATCH_SIZE)
return false if environments.empty?
Environments::AutoStopWorker.bulk_perform_async_with_contexts(
environments,
arguments_proc: -> (environment) { environment.id },
context_proc: -> (environment) { { project: environment.project } }
)
true
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::AutoStopService, :clean_gitlab_redis_shared_state, :sidekiq_inline,
feature_category: :continuous_delivery do
include CreateEnvironmentsHelpers
include ExclusiveLeaseHelpers
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new }
before_all do
project.add_developer(user)
end
describe '#execute' do
subject { service.execute }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:environments) { Environment.all }
before_all do
project.add_developer(user)
project.repository.add_branch(user, 'review/feature-1', 'master')
project.repository.add_branch(user, 'review/feature-2', 'master')
end
before do
create_review_app(user, project, 'review/feature-1')
create_review_app(user, project, 'review/feature-2')
end
it 'stops environments and play stop jobs' do
expect { subject }
.to change { Environment.all.map(&:state).uniq }
.from(['available']).to(['stopping'])
expect(Ci::Build.where(name: 'stop_review_app').map(&:status).uniq).to eq(['pending'])
end
it 'schedules stop processes in bulk' do
args = [[Environment.find_by_name('review/feature-1').id], [Environment.find_by_name('review/feature-2').id]]
expect(Environments::AutoStopWorker)
.to receive(:bulk_perform_async).with(args).once.and_call_original
subject
end
context 'when the other sidekiq worker has already been running' do
before do
stub_exclusive_lease_taken(described_class::EXCLUSIVE_LOCK_KEY)
end
it 'does not execute stop_in_batch' do
expect_next_instance_of(described_class) do |service|
expect(service).not_to receive(:stop_in_batch)
end
expect { subject }.to raise_error(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError)
end
end
context 'when loop reached timeout' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
stub_const("#{described_class}::LOOP_LIMIT", 100_000)
allow_next_instance_of(described_class) do |service|
allow(service).to receive(:stop_in_batch) { true }
end
end
it 'returns false and does not continue the process' do
is_expected.to eq(false)
end
end
context 'when loop reached loop limit' do
before do
stub_const("#{described_class}::LOOP_LIMIT", 1)
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it 'stops only one available environment' do
expect { subject }.to change { Environment.available.count }.by(-1)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class StopService < BaseService
attr_reader :ref
def execute(environment)
unless can?(current_user, :stop_environment, environment)
return ServiceResponse.error(
message: 'Unauthorized to stop the environment',
payload: { environment: environment }
)
end
if params[:force]
environment.stop_complete!
else
environment.stop_with_actions!(current_user)
end
unless environment.saved_change_to_attribute?(:state)
return ServiceResponse.error(
message: 'Attemped to stop the environment but failed to change the status',
payload: { environment: environment }
)
end
ServiceResponse.success(payload: { environment: environment })
end
def execute_for_branch(branch_name)
@ref = branch_name
return unless @ref.present?
environments.each { |environment| execute(environment) }
end
def execute_for_merge_request_pipeline(merge_request)
return unless merge_request.actual_head_pipeline&.merge_request?
created_environments = merge_request.created_environments
if created_environments.any?
# This log message can be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/372965
Gitlab::AppJsonLogger.info(message: 'Running new dynamic environment stop logic', project_id: project.id)
created_environments.each { |env| execute(env) }
else
environments_in_head_pipeline = merge_request.environments_in_head_pipeline(deployment_status: :success)
environments_in_head_pipeline.each { |env| execute(env) }
if environments_in_head_pipeline.any?
# If we don't see a message often, we'd be able to remove this path. (or likely in GitLab 16.0)
# See https://gitlab.com/gitlab-org/gitlab/-/issues/372965
Gitlab::AppJsonLogger.info(message: 'Running legacy dynamic environment stop logic', project_id: project.id)
end
end
end
private
def environments
@environments ||= Environments::EnvironmentsByDeploymentsFinder
.new(project, current_user, ref: @ref, recently_updated: true)
.execute
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::StopService, feature_category: :continuous_delivery do
include CreateEnvironmentsHelpers
let(:project) { create(:project, :private, :repository) }
let(:user) { create(:user) }
let(:service) { described_class.new(project, user) }
describe '#execute' do
subject { service.execute(environment) }
let_it_be(:project) { create(:project, :private, :repository) }
let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } }
let_it_be(:reporter) { create(:user).tap { |u| project.add_reporter(u) } }
let(:user) { developer }
context 'with a deployment' do
let!(:environment) { review_job.persisted_environment }
let!(:pipeline) { create(:ci_pipeline, project: project) }
let!(:review_job) { create(:ci_build, :with_deployment, :start_review_app, pipeline: pipeline, project: project) }
let!(:stop_review_job) { create(:ci_build, :with_deployment, :stop_review_app, :manual, pipeline: pipeline, project: project) }
before do
review_job.success!
end
context 'without stop action' do
let!(:environment) { create(:environment, :available, project: project) }
it 'stops the environment' do
expect { subject }.to change { environment.reload.state }.from('available').to('stopped')
end
end
it 'plays the stop action' do
expect { subject }.to change { stop_review_job.reload.status }.from('manual').to('pending')
end
context 'force option' do
let(:service) { described_class.new(project, user, { force: true }) }
it 'does not play the stop action when forced' do
expect { subject }.to change { environment.reload.state }.from('available').to('stopped')
expect(stop_review_job.reload.status).to eq('manual')
end
end
context 'when an environment has already been stopped' do
let!(:environment) { create(:environment, :stopped, project: project) }
it 'does not play the stop action' do
expect { subject }.not_to change { stop_review_job.reload.status }
end
end
end
context 'without a deployment' do
let!(:environment) { create(:environment, project: project) }
it 'stops the environment' do
expect { subject }.to change { environment.reload.state }.from('available').to('stopped')
end
context 'when the actor is a reporter' do
let(:user) { reporter }
it 'does not stop the environment' do
expect { subject }.not_to change { environment.reload.state }
end
end
end
end
describe '#execute_for_branch' do
context 'when environment with review app exists' do
context 'when user has permission to stop environment' do
before do
project.add_developer(user)
end
context 'when environment is associated with removed branch' do
it 'stops environment' do
expect_environment_stopping_on('feature', feature_environment)
end
end
context 'when environment is associated with different branch' do
it 'does not stop environment' do
expect_environment_not_stopped_on('master', feature_environment)
end
end
context 'when specified branch does not exist' do
it 'does not stop environment' do
expect_environment_not_stopped_on('non/existent/branch', feature_environment)
end
end
context 'when no branch not specified' do
it 'does not stop environment' do
expect_environment_not_stopped_on(nil, feature_environment)
end
end
context 'when environment is not stopped' do
before do
allow_next_found_instance_of(Environment) do |environment|
allow(environment).to receive(:state).and_return(:stopped)
end
end
it 'does not stop environment' do
expect_environment_not_stopped_on('feature', feature_environment)
end
end
end
context 'when user does not have permission to stop environment' do
context 'when user has no access to manage deployments' do
before do
project.add_guest(user)
end
it 'does not stop environment' do
expect_environment_not_stopped_on('master', feature_environment)
end
end
end
context 'when branch for stop action is protected' do
before do
project.add_developer(user)
create(:protected_branch, :no_one_can_push, name: 'master', project: project)
end
it 'does not stop environment' do
expect_environment_not_stopped_on('master', feature_environment)
end
end
end
context 'when there is no environment associated with review app' do
before do
create(:environment, project: project)
end
context 'when user has permission to stop environments' do
before do
project.add_maintainer(user)
end
it 'does not stop environment' do
expect_environment_not_stopped_on('master', feature_environment)
end
end
end
context 'when environment does not exist' do
it 'does not raise error' do
expect { service.execute_for_branch('master') }
.not_to raise_error
end
end
end
describe '#execute_for_merge_request_pipeline' do
subject { service.execute_for_merge_request_pipeline(merge_request) }
let(:merge_request) { create(:merge_request, source_branch: 'feature', target_branch: 'master') }
let(:project) { merge_request.project }
let(:user) { create(:user) }
let(:pipeline) do
create(:ci_pipeline,
source: :merge_request_event,
merge_request: merge_request,
project: project,
sha: merge_request.diff_head_sha,
merge_requests_as_head_pipeline: [merge_request])
end
let!(:review_job) { create(:ci_build, :with_deployment, :start_review_app, :success, pipeline: pipeline, project: project) }
let!(:stop_review_job) { create(:ci_build, :with_deployment, :stop_review_app, :manual, pipeline: pipeline, project: project) }
before do
review_job.deployment.success!
end
it 'has active environment at first' do
expect(pipeline.environments_in_self_and_project_descendants.first).to be_available
end
context 'when user is a developer' do
before do
project.add_developer(user)
end
context 'and merge request has associated created_environments' do
let!(:environment1) { create(:environment, project: project, merge_request: merge_request) }
let!(:environment2) { create(:environment, project: project, merge_request: merge_request) }
let!(:environment3) { create(:environment, project: project) }
let!(:environment3_deployment) { create(:deployment, environment: environment3, sha: pipeline.sha) }
before do
subject
end
it 'stops the associated created_environments' do
expect(environment1.reload).to be_stopped
expect(environment2.reload).to be_stopped
end
it 'does not affect environments that are not associated to the merge request' do
expect(environment3.reload).to be_available
end
end
it 'stops the active environment' do
subject
expect(pipeline.environments_in_self_and_project_descendants.first).to be_stopping
end
context 'when pipeline is a branch pipeline for merge request' do
let(:pipeline) do
create(:ci_pipeline,
source: :push,
project: project,
sha: merge_request.diff_head_sha,
merge_requests_as_head_pipeline: [merge_request])
end
it 'does not stop the active environment' do
subject
expect(pipeline.environments_in_self_and_project_descendants.first).to be_available
end
end
context 'with environment related jobs ' do
let!(:environment) { create(:environment, :available, name: 'staging', project: project) }
let!(:prepare_staging_job) { create(:ci_build, :prepare_staging, pipeline: pipeline, project: project) }
let!(:start_staging_job) { create(:ci_build, :start_staging, :with_deployment, :manual, pipeline: pipeline, project: project) }
let!(:stop_staging_job) { create(:ci_build, :stop_staging, :manual, pipeline: pipeline, project: project) }
it 'does not stop environments that was not started by the merge request' do
subject
expect(prepare_staging_job.persisted_environment.state).to eq('available')
end
end
end
context 'when user is a reporter' do
before do
project.add_reporter(user)
end
it 'does not stop the active environment' do
subject
expect(pipeline.environments_in_self_and_project_descendants.first).to be_available
end
end
context 'when pipeline is not associated with environments' do
let!(:job) { create(:ci_build, pipeline: pipeline, project: project) }
it 'does not raise exception' do
expect { subject }.not_to raise_exception
end
end
context 'when pipeline is not a pipeline for merge request' do
let(:pipeline) do
create(:ci_pipeline,
project: project,
ref: 'feature',
sha: merge_request.diff_head_sha,
merge_requests_as_head_pipeline: [merge_request])
end
it 'does not stop the active environment' do
subject
expect(pipeline.environments_in_self_and_project_descendants.first).to be_available
end
end
end
def expect_environment_stopped_on(branch, environment)
expect { service.execute_for_branch(branch) }
.to change { environment.reload.state }.from('available').to('stopped')
end
def expect_environment_stopping_on(branch, environment)
expect { service.execute_for_branch(branch) }
.to change { environment.reload.state }.from('available').to('stopping')
end
def expect_environment_not_stopped_on(branch, environment)
expect { service.execute_for_branch(branch) }
.not_to change { environment.reload.state }.from('available')
end
def feature_environment
create(:environment, :with_review_app, project: project, ref: 'feature')
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class AutoRecoverService
include ::Gitlab::ExclusiveLeaseHelpers
include ::Gitlab::LoopHelpers
BATCH_SIZE = 100
LOOP_TIMEOUT = 45.minutes
LOOP_LIMIT = 1000
EXCLUSIVE_LOCK_KEY = 'environments:auto_recover:lock'
LOCK_TIMEOUT = 50.minutes
##
# Recover environments that are stuck stopping on a GitLab instance
#
# This auto stop process cannot run for more than 45 minutes. This is for
# preventing multiple `AutoStopCronWorker` CRON jobs run concurrently,
# which is scheduled at every hour.
def execute
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
recover_in_batch
end
end
end
private
def recover_in_batch
environments = Environment.preload_project.select(:id, :project_id).long_stopping.limit(BATCH_SIZE)
return false if environments.empty?
Environments::AutoRecoverWorker.bulk_perform_async_with_contexts(
environments,
arguments_proc: ->(environment) { environment.id },
context_proc: ->(environment) { { project: environment.project } }
)
true
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::AutoRecoverService, :clean_gitlab_redis_shared_state, :sidekiq_inline,
feature_category: :continuous_delivery do
include CreateEnvironmentsHelpers
include ExclusiveLeaseHelpers
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new }
before_all do
project.add_developer(user)
end
describe '#execute' do
subject { service.execute }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:environments) { Environment.all }
before_all do
project.add_developer(user)
project.repository.add_branch(user, 'review/feature-1', 'master')
project.repository.add_branch(user, 'review/feature-2', 'master')
end
before do
create_review_app(user, project, 'review/feature-1')
create_review_app(user, project, 'review/feature-2')
Environment.all.map do |e|
e.stop_actions.map(&:drop)
e.stop!
e.update!(updated_at: (Environment::LONG_STOP + 1.day).ago)
e.reload
end
end
it 'stops environments that have been stuck stopping too long' do
expect { subject }
.to change { Environment.all.map(&:state).uniq }
.from(['stopping']).to(['available'])
end
it 'schedules stop processes in bulk' do
args = [[Environment.find_by_name('review/feature-1').id], [Environment.find_by_name('review/feature-2').id]]
expect(Environments::AutoRecoverWorker)
.to receive(:bulk_perform_async).with(args).once.and_call_original
subject
end
context 'when the other sidekiq worker has already been running' do
before do
stub_exclusive_lease_taken(described_class::EXCLUSIVE_LOCK_KEY)
end
it 'does not execute recover_in_batch' do
expect_next_instance_of(described_class) do |service|
expect(service).not_to receive(:recover_in_batch)
end
expect { subject }.to raise_error(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError)
end
end
context 'when loop reached timeout' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
stub_const("#{described_class}::LOOP_LIMIT", 100_000)
allow_next_instance_of(described_class) do |service|
allow(service).to receive(:recover_in_batch).and_return(true)
end
end
it 'returns false and does not continue the process' do
is_expected.to eq(false)
end
end
context 'when loop reached loop limit' do
before do
stub_const("#{described_class}::LOOP_LIMIT", 1)
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it 'stops only one available environment' do
expect { subject }.to change { Environment.long_stopping.count }.by(-1)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class DestroyService < BaseService
def execute(environment)
unless can?(current_user, :destroy_environment, environment)
return ServiceResponse.error(
message: 'Unauthorized to delete the environment'
)
end
environment.destroy
unless environment.destroyed?
return ServiceResponse.error(
message: 'Attemped to destroy the environment but failed'
)
end
ServiceResponse.success
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::DestroyService, feature_category: :continuous_delivery do
include CreateEnvironmentsHelpers
let_it_be(:project) { create(:project, :private, :repository) }
let_it_be(:user) { create(:user) }
let(:service) { described_class.new(project, user) }
describe '#execute' do
subject { service.execute(environment) }
let_it_be(:project) { create(:project, :private, :repository) }
let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } }
let_it_be(:reporter) { create(:user).tap { |u| project.add_reporter(u) } }
let(:user) { developer }
let!(:environment) { create(:environment, project: project, state: :stopped) }
context "when destroy is authorized" do
it 'destroys the environment' do
expect { subject }.to change { environment.destroyed? }.from(false).to(true)
end
end
context "when destroy is not authorized" do
let(:user) { reporter }
it 'does not destroy the environment' do
expect { subject }.not_to change { environment.destroyed? }
end
end
context "when destroy fails" do
before do
allow(environment)
.to receive(:destroy)
.and_return(false)
end
it 'returns errors' do
expect(subject.message).to include("Attemped to destroy the environment but failed")
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class UpdateService < BaseService
ALLOWED_ATTRIBUTES = %i[external_url tier cluster_agent kubernetes_namespace flux_resource_path].freeze
def execute(environment)
unless can?(current_user, :update_environment, environment)
return ServiceResponse.error(
message: _('Unauthorized to update the environment'),
payload: { environment: environment }
)
end
if unauthorized_cluster_agent?
return ServiceResponse.error(
message: _('Unauthorized to access the cluster agent in this project'),
payload: { environment: environment })
end
if environment.update(**params.slice(*ALLOWED_ATTRIBUTES))
ServiceResponse.success(payload: { environment: environment })
else
ServiceResponse.error(
message: environment.errors.full_messages,
payload: { environment: environment }
)
end
end
private
def unauthorized_cluster_agent?
return false unless params[:cluster_agent]
::Clusters::Agents::Authorizations::UserAccess::Finder
.new(current_user, agent: params[:cluster_agent], project: project)
.execute
.empty?
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::UpdateService, feature_category: :environment_management do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } }
let_it_be(:reporter) { create(:user).tap { |u| project.add_reporter(u) } }
let_it_be(:environment) { create(:environment, project: project) }
let(:service) { described_class.new(project, current_user, params) }
let(:current_user) { developer }
let(:params) { {} }
describe '#execute' do
subject { service.execute(environment) }
let(:params) { { external_url: 'https://gitlab.com/' } }
it 'updates the external URL' do
expect { subject }.to change { environment.reload.external_url }.to('https://gitlab.com/')
end
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment]).to eq(environment)
end
context 'when setting a kubernetes namespace to the environment' do
let(:params) { { kubernetes_namespace: 'default' } }
it 'updates the kubernetes namespace' do
expect { subject }.to change { environment.reload.kubernetes_namespace }.to('default')
end
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment]).to eq(environment)
end
end
context 'when setting a flux resource path to the environment' do
let(:params) { { flux_resource_path: 'path/to/flux/resource' } }
it 'updates the flux resource path' do
expect { subject }.to change { environment.reload.flux_resource_path }.to('path/to/flux/resource')
end
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment]).to eq(environment)
end
end
context 'when setting a cluster agent to the environment' do
let_it_be(:agent_management_project) { create(:project) }
let_it_be(:cluster_agent) { create(:cluster_agent, project: agent_management_project) }
let!(:authorization) { create(:agent_user_access_project_authorization, project: project, agent: cluster_agent) }
let(:params) { { cluster_agent: cluster_agent } }
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].cluster_agent).to eq(cluster_agent)
end
context 'when user does not have permission to read the agent' do
let!(:authorization) { nil }
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to eq('Unauthorized to access the cluster agent in this project')
expect(response.payload[:environment]).to eq(environment)
end
end
end
context 'when unsetting a cluster agent of the environment' do
let_it_be(:cluster_agent) { create(:cluster_agent, project: project) }
let(:params) { { cluster_agent: nil } }
before do
environment.update!(cluster_agent: cluster_agent)
end
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].cluster_agent).to be_nil
end
end
context 'when params contain invalid value' do
let(:params) { { external_url: 'http://${URL}' } }
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to match_array("External url URI is invalid")
expect(response.payload[:environment]).to eq(environment)
end
end
context 'when disallowed parameter is passed' do
let(:params) { { external_url: 'https://gitlab.com/', slug: 'prod' } }
it 'ignores the parameter' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].external_url).to eq('https://gitlab.com/')
expect(response.payload[:environment].slug).not_to eq('prod')
end
end
context 'when user is reporter' do
let(:current_user) { reporter }
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to eq('Unauthorized to update the environment')
expect(response.payload[:environment]).to eq(environment)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class CreateService < BaseService
ALLOWED_ATTRIBUTES = %i[name external_url tier cluster_agent kubernetes_namespace flux_resource_path].freeze
def execute
unless can?(current_user, :create_environment, project)
return ServiceResponse.error(
message: _('Unauthorized to create an environment'),
payload: { environment: nil }
)
end
if unauthorized_cluster_agent?
return ServiceResponse.error(
message: _('Unauthorized to access the cluster agent in this project'),
payload: { environment: nil })
end
environment = project.environments.create(**params.slice(*ALLOWED_ATTRIBUTES))
if environment.persisted?
ServiceResponse.success(payload: { environment: environment })
else
ServiceResponse.error(
message: environment.errors.full_messages,
payload: { environment: nil }
)
end
end
private
def unauthorized_cluster_agent?
return false unless params[:cluster_agent]
::Clusters::Agents::Authorizations::UserAccess::Finder
.new(current_user, agent: params[:cluster_agent], project: project)
.execute
.empty?
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::CreateService, feature_category: :environment_management do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } }
let_it_be(:reporter) { create(:user).tap { |u| project.add_reporter(u) } }
let(:service) { described_class.new(project, current_user, params) }
let(:current_user) { developer }
let(:params) { {} }
describe '#execute' do
subject { service.execute }
let(:params) { { name: 'production', external_url: 'https://gitlab.com', tier: :production, kubernetes_namespace: 'default', flux_resource_path: 'path/to/flux/resource' } }
it 'creates an environment' do
expect { subject }.to change { ::Environment.count }.by(1)
end
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].name).to eq('production')
expect(response.payload[:environment].external_url).to eq('https://gitlab.com')
expect(response.payload[:environment].tier).to eq('production')
expect(response.payload[:environment].kubernetes_namespace).to eq('default')
expect(response.payload[:environment].flux_resource_path).to eq('path/to/flux/resource')
end
context 'with a cluster agent' do
let_it_be(:agent_management_project) { create(:project) }
let_it_be(:cluster_agent) { create(:cluster_agent, project: agent_management_project) }
let!(:authorization) { create(:agent_user_access_project_authorization, project: project, agent: cluster_agent) }
let(:params) { { name: 'production', cluster_agent: cluster_agent } }
it 'returns successful response' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].cluster_agent).to eq(cluster_agent)
end
context 'when user does not have permission to read the agent' do
let!(:authorization) { nil }
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to eq('Unauthorized to access the cluster agent in this project')
expect(response.payload[:environment]).to be_nil
end
end
end
context 'when params contain invalid value' do
let(:params) { { name: 'production', external_url: 'http://${URL}' } }
it 'does not create an environment' do
expect { subject }.not_to change { ::Environment.count }
end
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to match_array("External url URI is invalid")
expect(response.payload[:environment]).to be_nil
end
end
context 'when disallowed parameter is passed' do
let(:params) { { name: 'production', slug: 'prod' } }
it 'ignores the parameter' do
response = subject
expect(response).to be_success
expect(response.payload[:environment].name).to eq('production')
expect(response.payload[:environment].slug).not_to eq('prod')
end
end
context 'when user is reporter' do
let(:current_user) { reporter }
it 'does not create an environment' do
expect { subject }.not_to change { ::Environment.count }
end
it 'returns an error' do
response = subject
expect(response).to be_error
expect(response.message).to eq('Unauthorized to create an environment')
expect(response.payload[:environment]).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
class StopStaleService < BaseService
def execute
return ServiceResponse.error(message: 'Before date must be provided') unless params[:before].present?
return ServiceResponse.error(message: 'Unauthorized') unless can?(current_user, :stop_environment, project)
Environment.available
.deployed_and_updated_before(project.id, params[:before])
.without_protected(project)
.in_batches(of: 100) do |env_batch| # rubocop:disable Cop/InBatches
Environments::AutoStopWorker.bulk_perform_async_with_contexts(
env_batch,
arguments_proc: ->(environment) { environment.id },
context_proc: ->(environment) { { project: project } }
)
end
ServiceResponse.success(message: 'Successfully requested stop for all stale environments')
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::StopStaleService,
:clean_gitlab_redis_shared_state,
:sidekiq_inline,
feature_category: :continuous_delivery do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:params) { { after: nil } }
let(:service) { described_class.new(project, user, params) }
describe '#execute' do
subject { service.execute }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let_it_be(:stale_environment) { create(:environment, project: project, updated_at: 2.weeks.ago) }
let_it_be(:stale_environment2) { create(:environment, project: project, updated_at: 2.weeks.ago) }
let_it_be(:recent_environment) { create(:environment, project: project, updated_at: Date.today) }
let_it_be(:params) { { before: 1.week.ago } }
before do
allow(service).to receive(:can?).with(user, :stop_environment, project).and_return(true)
end
it 'only stops stale environments' do
spy_service = Environments::AutoStopWorker.new
allow(Environments::AutoStopWorker).to receive(:new) { spy_service }
expect(spy_service).to receive(:perform).with(stale_environment.id).and_call_original
expect(spy_service).to receive(:perform).with(stale_environment2.id).and_call_original
expect(spy_service).not_to receive(:perform).with(recent_environment.id)
expect(Environment).to receive(:deployed_and_updated_before).with(project.id, params[:before]).and_call_original
expect(Environment).to receive(:without_protected).with(project).and_call_original
expect(subject.success?).to be_truthy
expect(stale_environment.reload).to be_stopped
expect(stale_environment2.reload).to be_stopped
expect(recent_environment.reload).to be_available
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Environments
module CanaryIngress
class UpdateService < ::BaseService
def execute_async(environment)
result = validate(environment)
return result unless result[:status] == :success
Environments::CanaryIngress::UpdateWorker.perform_async(environment.id, params)
success
end
# This method actually executes the PATCH request to Kubernetes,
# that is used by internal processes i.e. sidekiq worker.
# You should always use `execute_async` to properly validate user's requests.
def execute(environment)
canary_ingress = environment.ingresses&.find(&:canary?)
unless canary_ingress.present?
return error(_('Canary Ingress does not exist in the environment.'))
end
if environment.patch_ingress(canary_ingress, patch_data)
environment.clear_all_caches
success
else
error(_('Failed to update the Canary Ingress.'), :bad_request)
end
end
private
def validate(environment)
unless can?(current_user, :update_environment, environment)
return error(_('You do not have permission to update the environment.'))
end
unless params[:weight].is_a?(Integer) && (0..100).cover?(params[:weight])
return error(_('Canary weight must be specified and valid range (0..100).'))
end
if environment.has_running_deployments?
return error(_('There are running deployments on the environment. Please retry later.'))
end
if ::Gitlab::ApplicationRateLimiter.throttled?(:update_environment_canary_ingress, scope: [environment])
return error(_("This environment's canary ingress has been updated recently. Please retry later."))
end
success
end
def patch_data
{
metadata: {
annotations: {
Gitlab::Kubernetes::Ingress::ANNOTATION_KEY_CANARY_WEIGHT => params[:weight].to_s
}
}
}
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::CanaryIngress::UpdateService, :clean_gitlab_redis_cache,
feature_category: :continuous_delivery do
include KubernetesHelpers
let_it_be(:project, refind: true) { create(:project) }
let_it_be(:maintainer) { create(:user) }
let_it_be(:reporter) { create(:user) }
let(:user) { maintainer }
let(:params) { {} }
let(:service) { described_class.new(project, user, params) }
before_all do
project.add_maintainer(maintainer)
project.add_reporter(reporter)
end
shared_examples_for 'failed request' do
it 'returns an error' do
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to eq(message)
end
end
describe '#execute_async' do
subject { service.execute_async(environment) }
let(:environment) { create(:environment, project: project) }
let(:params) { { weight: 50 } }
let(:canary_ingress) { ::Gitlab::Kubernetes::Ingress.new(kube_ingress(track: :canary)) }
context 'when the actor does not have permission to update environment' do
let(:user) { reporter }
it_behaves_like 'failed request' do
let(:message) { "You do not have permission to update the environment." }
end
end
context 'when weight parameter is invalid' do
let(:params) { { weight: 'unknown' } }
it_behaves_like 'failed request' do
let(:message) { 'Canary weight must be specified and valid range (0..100).' }
end
end
context 'when no parameters exist' do
let(:params) { {} }
it_behaves_like 'failed request' do
let(:message) { 'Canary weight must be specified and valid range (0..100).' }
end
end
context 'when environment has a running deployment' do
before do
allow(environment).to receive(:has_running_deployments?) { true }
end
it_behaves_like 'failed request' do
let(:message) { 'There are running deployments on the environment. Please retry later.' }
end
end
context 'when canary ingress was updated recently' do
before do
allow(::Gitlab::ApplicationRateLimiter).to receive(:throttled?) { true }
end
it_behaves_like 'failed request' do
let(:message) { "This environment's canary ingress has been updated recently. Please retry later." }
end
end
end
describe '#execute' do
subject { service.execute(environment) }
let(:environment) { create(:environment, project: project) }
let(:params) { { weight: 50 } }
let(:canary_ingress) { ::Gitlab::Kubernetes::Ingress.new(kube_ingress(track: :canary)) }
context 'when canary ingress is present in the environment' do
before do
allow(environment).to receive(:ingresses) { [canary_ingress] }
end
context 'when patch request succeeds' do
let(:patch_data) do
{
metadata: {
annotations: {
Gitlab::Kubernetes::Ingress::ANNOTATION_KEY_CANARY_WEIGHT => params[:weight].to_s
}
}
}
end
before do
allow(environment).to receive(:patch_ingress).with(canary_ingress, patch_data) { true }
end
it 'returns success' do
expect(subject[:status]).to eq(:success)
expect(subject[:message]).to be_nil
end
it 'clears all caches' do
expect(environment).to receive(:clear_all_caches)
subject
end
end
context 'when patch request does not succeed' do
before do
allow(environment).to receive(:patch_ingress) { false }
end
it_behaves_like 'failed request' do
let(:message) { 'Failed to update the Canary Ingress.' }
end
end
end
context 'when canary ingress is not present in the environment' do
it_behaves_like 'failed request' do
let(:message) { 'Canary Ingress does not exist in the environment.' }
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class ImportService < BaseService
Error = Class.new(StandardError)
PermissionError = Class.new(StandardError)
# Returns true if this importer is supposed to perform its work in the
# background.
#
# This method will only return `true` if async importing is explicitly
# supported by an importer class (`Gitlab::GithubImport::ParallelImporter`
# for example).
def async?
has_importer? && !!importer_class.try(:async?)
end
def execute
track_start_import
add_repository_to_project
validate_repository_size!
download_lfs_objects
import_data
after_execute_hook
success
rescue Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError, StandardError => e
Gitlab::Import::ImportFailureService.track(
project_id: project.id,
error_source: self.class.name,
exception: e,
metrics: true
)
message = Projects::ImportErrorFilter.filter_message(e.message)
error(
s_(
"ImportProjects|Error importing repository %{project_safe_import_url} into %{project_full_path} - %{message}"
) % { project_safe_import_url: project.safe_import_url, project_full_path: project.full_path, message: message }
)
end
protected
def extra_attributes_for_measurement
{
current_user: current_user&.name,
project_full_path: project&.full_path,
import_type: project&.import_type,
file_path: project&.import_source
}
end
private
attr_reader :resolved_address
def validate_repository_size!
# Defined in EE::Projects::ImportService
end
def after_execute_hook
# Defined in EE::Projects::ImportService
end
def track_start_import
has_importer? && importer_class.try(:track_start_import, project)
end
def add_repository_to_project
if project.external_import? && !unknown_url?
begin
@resolved_address = get_resolved_address
rescue Gitlab::HTTP_V2::UrlBlocker::BlockedUrlError => e
raise e, s_("ImportProjects|Blocked import URL: %{message}") % { message: e.message }
end
end
# We should skip the repository for a GitHub import or GitLab project import,
# because these importers fetch the project repositories for us.
return if importer_imports_repository?
if unknown_url?
# In this case, we only want to import issues, not a repository.
create_repository
elsif !project.repository_exists?
import_repository
end
end
def create_repository
unless project.create_repository
raise Error, s_('ImportProjects|The repository could not be created.')
end
end
def import_repository
refmap = importer_class.try(:refmap) if has_importer?
if refmap
project.ensure_repository
project.repository.fetch_as_mirror(project.import_url, refmap: refmap, resolved_address: resolved_address)
else
project.repository.import_repository(project.import_url, resolved_address: resolved_address)
end
rescue ::Gitlab::Git::CommandError => e
# Expire cache to prevent scenarios such as:
# 1. First import failed, but the repo was imported successfully, so +exists?+ returns true
# 2. Retried import, repo is broken or not imported but +exists?+ still returns true
project.repository.expire_content_cache if project.repository_exists?
raise Error, e.message
end
def download_lfs_objects
# In this case, we only want to import issues
return if unknown_url?
# If it has its own repository importer, it has to implements its own lfs import download
return if importer_imports_repository?
return unless project.lfs_enabled?
result = Projects::LfsPointers::LfsImportService.new(project).execute
if result[:status] == :error
# To avoid aborting the importing process, we silently fail
# if any exception raises.
Gitlab::AppLogger.error("The Lfs import process failed. #{result[:message]}")
end
end
def import_data
return unless has_importer?
project.repository.expire_content_cache unless project.gitlab_project_import?
unless importer.execute
raise Error, s_('ImportProjects|The remote data could not be imported.')
end
end
def importer_class
@importer_class ||= Gitlab::ImportSources.importer(project.import_type)
end
def has_importer?
Gitlab::ImportSources.importer_names.include?(project.import_type)
end
def importer
importer_class.new(project)
end
def unknown_url?
project.import_url == Project::UNKNOWN_IMPORT_URL
end
def importer_imports_repository?
has_importer? && importer_class.try(:imports_repository?)
end
def get_resolved_address
Gitlab::UrlBlocker
.validate!(
project.import_url,
schemes: Project::VALID_IMPORT_PROTOCOLS,
ports: Project::VALID_IMPORT_PORTS,
allow_localhost: allow_local_requests?,
allow_local_network: allow_local_requests?,
dns_rebind_protection: dns_rebind_protection?)
.then do |(import_url, resolved_host)|
next '' if resolved_host.nil? || !import_url.scheme.in?(%w[http https])
import_url.hostname.to_s
end
end
def allow_local_requests?
Rails.env.development? && # There is no known usecase for this in non-development environments
Gitlab::CurrentSettings.allow_local_requests_from_web_hooks_and_services?
end
def dns_rebind_protection?
return false if Gitlab.http_proxy_env?
Gitlab::CurrentSettings.dns_rebinding_protection_enabled?
end
end
end
Projects::ImportService.prepend_mod_with('Projects::ImportService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ImportService, feature_category: :importers do
let!(:project) { create(:project) }
let(:user) { project.creator }
subject { described_class.new(project, user) }
before do
allow(project).to receive(:lfs_enabled?).and_return(true)
end
describe '#async?' do
it 'returns true for an asynchronous importer' do
importer_class = double(:importer, async?: true)
allow(subject).to receive(:has_importer?).and_return(true)
allow(subject).to receive(:importer_class).and_return(importer_class)
expect(subject).to be_async
end
it 'returns false for a regular importer' do
importer_class = double(:importer, async?: false)
allow(subject).to receive(:has_importer?).and_return(true)
allow(subject).to receive(:importer_class).and_return(importer_class)
expect(subject).not_to be_async
end
it 'returns false when the importer does not define #async?' do
importer_class = double(:importer)
allow(subject).to receive(:has_importer?).and_return(true)
allow(subject).to receive(:importer_class).and_return(importer_class)
expect(subject).not_to be_async
end
it 'returns false when the importer does not exist' do
allow(subject).to receive(:has_importer?).and_return(false)
expect(subject).not_to be_async
end
end
describe '#execute' do
context 'with unknown url' do
before do
project.import_url = Project::UNKNOWN_IMPORT_URL
end
it 'succeeds if repository is created successfully' do
expect(project).to receive(:create_repository).and_return(true)
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if repository creation fails' do
expect(project).to receive(:create_repository).and_return(false)
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to eq "Error importing repository #{project.safe_import_url} into #{project.full_path} - The repository could not be created."
end
context 'when repository creation succeeds' do
it 'does not download lfs files' do
expect_any_instance_of(Projects::LfsPointers::LfsImportService).not_to receive(:execute)
subject.execute
end
end
end
context 'with known url' do
before do
project.import_url = 'https://github.com/vim/vim.git'
project.import_type = 'github'
end
context 'with a Github repository' do
it 'tracks the start of import' do
expect(Gitlab::GithubImport::ParallelImporter).to receive(:track_start_import)
subject.execute
end
it 'succeeds if repository import was scheduled' do
expect_any_instance_of(Gitlab::GithubImport::ParallelImporter)
.to receive(:execute)
.and_return(true)
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if repository import was not scheduled' do
expect_any_instance_of(Gitlab::GithubImport::ParallelImporter)
.to receive(:execute)
.and_return(false)
result = subject.execute
expect(result[:status]).to eq :error
end
context 'when repository import scheduled' do
it 'does not download lfs objects' do
expect_any_instance_of(Projects::LfsPointers::LfsImportService).not_to receive(:execute)
subject.execute
end
end
end
context 'with a non Github repository' do
before do
project.import_url = 'https://bitbucket.org/vim/vim.git'
project.import_type = 'bitbucket'
end
context 'when importer supports refmap' do
before do
project.import_type = 'gitea'
end
it 'succeeds if repository fetch as mirror is successful' do
expect(project).to receive(:ensure_repository)
expect(project.repository).to receive(:fetch_as_mirror).with('https://bitbucket.org/vim/vim.git', refmap: Gitlab::LegacyGithubImport::Importer.refmap, resolved_address: '').and_return(true)
expect_next_instance_of(Gitlab::LegacyGithubImport::Importer) do |importer|
expect(importer).to receive(:execute).and_return(true)
end
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if repository fetch as mirror fails' do
expect(project).to receive(:ensure_repository)
expect(project.repository)
.to receive(:fetch_as_mirror)
.and_raise(Gitlab::Git::CommandError, 'Failed to import the repository /a/b/c')
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to eq "Error importing repository #{project.safe_import_url} into #{project.full_path} - Failed to import the repository [FILTERED]"
end
end
context 'when importer does not support refmap' do
it 'succeeds if repository import is successful' do
expect_next_instance_of(Gitlab::BitbucketImport::ParallelImporter) do |importer|
expect(importer).to receive(:execute).and_return(true)
end
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if repository import fails' do
expect_next_instance_of(Gitlab::BitbucketImport::ParallelImporter) do |importer|
expect(importer).to receive(:execute)
.and_raise(Gitlab::Git::CommandError, 'Failed to import the repository /a/b/c')
end
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to eq "Error importing repository #{project.safe_import_url} into #{project.full_path} - Failed to import the repository [FILTERED]"
end
context 'when bitbucket_parallel_importer feature flag is disabled' do
before do
stub_feature_flags(bitbucket_parallel_importer: false)
end
it 'succeeds if repository import is successful' do
expect(project.repository).to receive(:import_repository).and_return(true)
expect_next_instance_of(Gitlab::BitbucketImport::Importer) do |importer|
expect(importer).to receive(:execute).and_return(true)
end
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if repository import fails' do
expect(project.repository)
.to receive(:import_repository)
.with('https://bitbucket.org/vim/vim.git', resolved_address: '')
.and_raise(Gitlab::Git::CommandError, 'Failed to import the repository /a/b/c')
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to eq "Error importing repository #{project.safe_import_url} into #{project.full_path} - Failed to import the repository [FILTERED]"
end
context 'when lfs import fails' do
it 'logs the error' do
error_message = 'error message'
expect(project.repository).to receive(:import_repository).and_return(true)
expect_next_instance_of(Gitlab::BitbucketImport::Importer) do |importer|
expect(importer).to receive(:execute).and_return(true)
end
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :error, message: error_message)
end
expect(Gitlab::AppLogger).to receive(:error).with("The Lfs import process failed. #{error_message}")
subject.execute
end
end
context 'when repository import scheduled' do
before do
expect(project.repository).to receive(:import_repository).and_return(true)
allow(subject).to receive(:import_data)
end
it 'downloads lfs objects if lfs_enabled is enabled for project' do
allow(project).to receive(:lfs_enabled?).and_return(true)
expect_any_instance_of(Projects::LfsPointers::LfsImportService).to receive(:execute)
subject.execute
end
it 'does not download lfs objects if lfs_enabled is not enabled for project' do
allow(project).to receive(:lfs_enabled?).and_return(false)
expect_any_instance_of(Projects::LfsPointers::LfsImportService).not_to receive(:execute)
subject.execute
end
end
end
end
end
end
context 'with valid importer' do
before do
provider = double(:provider).as_null_object
stub_omniauth_setting(providers: [provider])
project.import_url = 'https://github.com/vim/vim.git'
project.import_type = 'github'
allow(project).to receive(:import_data).and_return(double(:import_data).as_null_object)
end
it 'succeeds if importer succeeds' do
allow_any_instance_of(Gitlab::GithubImport::ParallelImporter)
.to receive(:execute).and_return(true)
result = subject.execute
expect(result[:status]).to eq :success
end
it 'fails if importer fails' do
allow_any_instance_of(Gitlab::GithubImport::ParallelImporter)
.to receive(:execute)
.and_return(false)
result = subject.execute
expect(result[:status]).to eq :error
end
context 'when importer' do
it 'has a custom repository importer it does not download lfs objects' do
allow(Gitlab::GithubImport::ParallelImporter).to receive(:imports_repository?).and_return(true)
expect_any_instance_of(Projects::LfsPointers::LfsImportService).not_to receive(:execute)
subject.execute
end
it 'does not have a custom repository importer downloads lfs objects' do
allow(Gitlab::GithubImport::ParallelImporter).to receive(:imports_repository?).and_return(false)
expect_any_instance_of(Projects::LfsPointers::LfsImportService).to receive(:execute)
subject.execute
end
context 'when lfs import fails' do
it 'logs the error' do
error_message = 'error message'
allow(Gitlab::GithubImport::ParallelImporter).to receive(:imports_repository?).and_return(false)
expect_any_instance_of(Projects::LfsPointers::LfsImportService).to receive(:execute).and_return(status: :error, message: error_message)
expect(Gitlab::AppLogger).to receive(:error).with("The Lfs import process failed. #{error_message}")
subject.execute
end
end
end
end
context 'with blocked import_URL' do
it 'fails with localhost' do
project.import_url = 'https://localhost:9000/vim/vim.git'
result = described_class.new(project, user).execute
expect(result[:status]).to eq :error
expect(result[:message]).to include('Requests to localhost are not allowed')
end
it 'fails with port 25' do
project.import_url = "https://github.com:25/vim/vim.git"
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to include('Only allowed ports are 80, 443')
end
it 'fails with file scheme' do
project.import_url = "file:///tmp/dir.git"
result = subject.execute
expect(result[:status]).to eq :error
expect(result[:message]).to include('Only allowed schemes are http, https')
end
end
context 'when import is a local request' do
before do
project.import_url = "http://127.0.0.1/group/project"
end
context 'when local network requests are enabled' do
before do
stub_application_setting(allow_local_requests_from_web_hooks_and_services: true)
end
it 'returns an error' do
expect(project.repository).not_to receive(:import_repository)
expect(subject.execute).to include(
status: :error,
message: end_with('Requests to localhost are not allowed')
)
end
context 'when environment is development' do
before do
stub_rails_env('development')
end
it 'imports successfully' do
expect(project.repository)
.to receive(:import_repository)
.and_return(true)
expect(subject.execute[:status]).to eq(:success)
end
end
end
end
context 'when DNS rebind protection is disabled' do
before do
allow(Gitlab::CurrentSettings).to receive(:dns_rebinding_protection_enabled?).and_return(false)
project.import_url = "https://example.com/group/project"
allow(Gitlab::UrlBlocker).to receive(:validate!)
.with(
project.import_url,
ports: Project::VALID_IMPORT_PORTS,
schemes: Project::VALID_IMPORT_PROTOCOLS,
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: false
)
.and_return([Addressable::URI.parse("https://example.com/group/project"), nil])
end
it 'imports repository with url without additional resolved address' do
expect(project.repository).to receive(:import_repository).with('https://example.com/group/project', resolved_address: '').and_return(true)
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq(:success)
end
end
context 'when DNS rebind protection is enabled' do
before do
allow(Gitlab::CurrentSettings).to receive(:http_proxy_env?).and_return(false)
allow(Gitlab::CurrentSettings).to receive(:dns_rebinding_protection_enabled?).and_return(true)
end
context 'when https url is provided' do
before do
project.import_url = "https://example.com/group/project"
allow(Gitlab::UrlBlocker).to receive(:validate!)
.with(
project.import_url,
ports: Project::VALID_IMPORT_PORTS,
schemes: Project::VALID_IMPORT_PROTOCOLS,
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true
)
.and_return([Addressable::URI.parse("https://172.16.123.1/group/project"), 'example.com'])
end
it 'imports repository with url and additional resolved address' do
expect(project.repository).to receive(:import_repository).with('https://example.com/group/project', resolved_address: '172.16.123.1').and_return(true)
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq(:success)
end
context 'when host resolves to an IPv6 address' do
before do
project.import_url = 'https://gitlab.com/gitlab-org/gitlab-development-kit'
allow(Gitlab::UrlBlocker).to receive(:validate!)
.with(
project.import_url,
ports: Project::VALID_IMPORT_PORTS,
schemes: Project::VALID_IMPORT_PROTOCOLS,
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true
)
.and_return([Addressable::URI.parse('https://[2606:4700:90:0:f22e:fbec:5bed:a9b9]/gitlab-org/gitlab-development-kit'), 'gitlab.com'])
end
it 'imports repository with url and additional resolved bare IPv6 address' do
expect(project.repository).to receive(:import_repository).with('https://gitlab.com/gitlab-org/gitlab-development-kit', resolved_address: '2606:4700:90:0:f22e:fbec:5bed:a9b9').and_return(true)
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq(:success)
end
end
end
context 'when http url is provided' do
before do
project.import_url = "http://example.com/group/project"
allow(Gitlab::UrlBlocker).to receive(:validate!)
.with(
project.import_url,
ports: Project::VALID_IMPORT_PORTS,
schemes: Project::VALID_IMPORT_PROTOCOLS,
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true
)
.and_return([Addressable::URI.parse("http://172.16.123.1/group/project"), 'example.com'])
end
it 'imports repository with url and additional resolved address' do
expect(project.repository).to receive(:import_repository).with('http://example.com/group/project', resolved_address: '172.16.123.1').and_return(true)
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq(:success)
end
end
context 'when git address is provided' do
before do
project.import_url = "git://example.com/group/project.git"
allow(Gitlab::UrlBlocker).to receive(:validate!)
.with(
project.import_url,
ports: Project::VALID_IMPORT_PORTS,
schemes: Project::VALID_IMPORT_PROTOCOLS,
allow_local_network: false,
allow_localhost: false,
dns_rebind_protection: true
)
.and_return([Addressable::URI.parse("git://172.16.123.1/group/project"), 'example.com'])
end
it 'imports repository with url and without resolved address' do
expect(project.repository).to receive(:import_repository).with('git://example.com/group/project.git', resolved_address: '').and_return(true)
expect_next_instance_of(Projects::LfsPointers::LfsImportService) do |service|
expect(service).to receive(:execute).and_return(status: :success)
end
result = subject.execute
expect(result[:status]).to eq(:success)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for counting and caching the number of open merge requests of
# a project.
class OpenMergeRequestsCountService < Projects::CountService
def cache_key_name
'open_merge_requests_count'
end
def self.query(project_ids)
MergeRequest.opened.of_projects(project_ids)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::OpenMergeRequestsCountService, :use_clean_rails_memory_store_caching, feature_category: :code_review_workflow do
let_it_be(:project) { create(:project) }
subject { described_class.new(project) }
it_behaves_like 'a counter caching service'
describe '#count' do
it 'returns the number of open merge requests' do
create(:merge_request, :opened, source_project: project, target_project: project)
expect(subject.count).to eq(1)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class that can be used to execute actions necessary after creating a
# default branch.
class ProtectDefaultBranchService
attr_reader :project, :default_branch_protection
# @param [Project] project
def initialize(project)
@project = project
@default_branch_protection = Gitlab::Access::BranchProtection
.new(project.namespace.default_branch_protection)
end
def execute
protect_default_branch if default_branch
end
def protect_default_branch
# Ensure HEAD points to the default branch in case it is not master
project.change_head(default_branch)
create_protected_branch if protect_branch? && !protected_branch_exists?
end
def create_protected_branch
params = {
name: default_branch,
push_access_levels_attributes: [{ access_level: push_access_level }],
merge_access_levels_attributes: [{ access_level: merge_access_level }]
}
# The creator of the project is always allowed to create protected
# branches, so we skip the authorization check in this service class.
ProtectedBranches::CreateService
.new(project, project.creator, params)
.execute(skip_authorization: true)
end
def protect_branch?
default_branch_protection.any? &&
!ProtectedBranch.protected?(project, default_branch)
end
def protected_branch_exists?
project.all_protected_branches.find_by_name(default_branch).present?
end
def default_branch
project.default_branch
end
def push_access_level
if default_branch_protection.developer_can_push?
Gitlab::Access::DEVELOPER
else
Gitlab::Access::MAINTAINER
end
end
def merge_access_level
if default_branch_protection.developer_can_merge?
Gitlab::Access::DEVELOPER
else
Gitlab::Access::MAINTAINER
end
end
end
end
Projects::ProtectDefaultBranchService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ProtectDefaultBranchService, feature_category: :source_code_management do
let(:service) { described_class.new(project) }
let(:project) { create(:project) }
describe '#execute' do
before do
allow(service)
.to receive(:protect_default_branch)
end
context 'without a default branch' do
it 'does nothing' do
allow(service)
.to receive(:default_branch)
.and_return(nil)
service.execute
expect(service)
.not_to have_received(:protect_default_branch)
end
end
context 'with a default branch' do
it 'protects the default branch' do
allow(service)
.to receive(:default_branch)
.and_return('master')
service.execute
expect(service)
.to have_received(:protect_default_branch)
end
end
end
describe '#protect_default_branch' do
before do
allow(service)
.to receive(:default_branch)
.and_return('master')
allow(project)
.to receive(:change_head)
.with('master')
allow(service)
.to receive(:create_protected_branch)
end
context 'when branch protection is needed' do
before do
allow(service)
.to receive(:protect_branch?)
.and_return(true)
allow(service)
.to receive(:create_protected_branch)
end
it 'changes the HEAD of the project' do
service.protect_default_branch
expect(project)
.to have_received(:change_head)
end
it 'protects the default branch' do
service.protect_default_branch
expect(service)
.to have_received(:create_protected_branch)
end
end
context 'when branch protection is not needed' do
before do
allow(service)
.to receive(:protect_branch?)
.and_return(false)
end
it 'changes the HEAD of the project' do
service.protect_default_branch
expect(project)
.to have_received(:change_head)
end
it 'does not protect the default branch' do
service.protect_default_branch
expect(service)
.not_to have_received(:create_protected_branch)
end
end
context 'when protected branch does not exist' do
before do
allow(service)
.to receive(:protected_branch_exists?)
.and_return(false)
allow(service)
.to receive(:protect_branch?)
.and_return(true)
end
it 'changes the HEAD of the project' do
service.protect_default_branch
expect(project)
.to have_received(:change_head)
end
it 'protects the default branch' do
service.protect_default_branch
expect(service)
.to have_received(:create_protected_branch)
end
end
context 'when protected branch already exists' do
before do
allow(service)
.to receive(:protected_branch_exists?)
.and_return(true)
end
it 'changes the HEAD of the project' do
service.protect_default_branch
expect(project)
.to have_received(:change_head)
end
it 'does not protect the default branch' do
service.protect_default_branch
expect(service)
.not_to have_received(:create_protected_branch)
end
end
end
describe '#create_protected_branch' do
it 'creates the protected branch' do
creator = instance_spy(User)
create_service = instance_spy(ProtectedBranches::CreateService)
access_level = Gitlab::Access::DEVELOPER
params = {
name: 'master',
push_access_levels_attributes: [{ access_level: access_level }],
merge_access_levels_attributes: [{ access_level: access_level }]
}
allow(project)
.to receive(:creator)
.and_return(creator)
allow(ProtectedBranches::CreateService)
.to receive(:new)
.with(project, creator, params)
.and_return(create_service)
allow(service)
.to receive(:push_access_level)
.and_return(access_level)
allow(service)
.to receive(:merge_access_level)
.and_return(access_level)
allow(service)
.to receive(:default_branch)
.and_return('master')
allow(create_service)
.to receive(:execute)
.with(skip_authorization: true)
service.create_protected_branch
expect(create_service)
.to have_received(:execute)
end
end
describe '#protect_branch?' do
context 'when default branch protection is disabled' do
it 'returns false' do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_NONE)
expect(service.protect_branch?).to eq(false)
end
end
context 'when default branch protection is enabled' do
before do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_DEV_CAN_MERGE)
allow(service)
.to receive(:default_branch)
.and_return('master')
end
it 'returns false if the branch is already protected' do
allow(ProtectedBranch)
.to receive(:protected?)
.with(project, 'master')
.and_return(true)
expect(service.protect_branch?).to eq(false)
end
it 'returns true if the branch is not yet protected' do
allow(ProtectedBranch)
.to receive(:protected?)
.with(project, 'master')
.and_return(false)
expect(service.protect_branch?).to eq(true)
end
end
end
describe '#protected_branch_exists?' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let(:default_branch) { "default-branch" }
before do
allow(project).to receive(:default_branch).and_return(default_branch)
create(:protected_branch, project: nil, group: group, name: default_branch)
end
context 'when feature flag `group_protected_branches` disabled' do
before do
stub_feature_flags(group_protected_branches: false)
stub_feature_flags(allow_protected_branches_for_group: false)
end
it 'return false' do
expect(service.protected_branch_exists?).to eq(false)
end
end
context 'when feature flag `group_protected_branches` enabled' do
before do
stub_feature_flags(group_protected_branches: true)
stub_feature_flags(allow_protected_branches_for_group: true)
end
it 'return true' do
expect(service.protected_branch_exists?).to eq(true)
end
end
end
describe '#default_branch' do
it 'returns the default branch of the project' do
allow(project)
.to receive(:default_branch)
.and_return('master')
expect(service.default_branch).to eq('master')
end
end
describe '#push_access_level' do
context 'when developers can push' do
it 'returns the DEVELOPER access level' do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_DEV_CAN_PUSH)
expect(service.push_access_level).to eq(Gitlab::Access::DEVELOPER)
end
end
context 'when developers can not push' do
it 'returns the MAINTAINER access level' do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_DEV_CAN_MERGE)
expect(service.push_access_level).to eq(Gitlab::Access::MAINTAINER)
end
end
end
describe '#merge_access_level' do
context 'when developers can merge' do
it 'returns the DEVELOPER access level' do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_DEV_CAN_MERGE)
expect(service.merge_access_level).to eq(Gitlab::Access::DEVELOPER)
end
end
context 'when developers can not merge' do
it 'returns the MAINTAINER access level' do
allow(project.namespace)
.to receive(:default_branch_protection)
.and_return(Gitlab::Access::PROTECTION_DEV_CAN_PUSH)
expect(service.merge_access_level).to eq(Gitlab::Access::MAINTAINER)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UpdateRepositoryStorageService
include UpdateRepositoryStorageMethods
delegate :project, to: :repository_storage_move
private
def track_repository(destination_storage_name)
project.update_column(:repository_storage, destination_storage_name)
# Connect project to pool repository from the new shard
project.swap_pool_repository!
# Connect project to the repository from the new shard
project.track_project_repository
# Link repository from the new shard to pool repository from the new shard
project.link_pool_repository if replicate_object_pool_on_move_ff_enabled?
end
def mirror_repositories
if project.repository_exists?
mirror_repository(type: Gitlab::GlRepository::PROJECT)
end
if project.wiki.repository_exists?
mirror_repository(type: Gitlab::GlRepository::WIKI)
end
if project.design_repository.exists?
mirror_repository(type: ::Gitlab::GlRepository::DESIGN)
end
end
def mirror_object_pool(destination_storage_name)
return unless replicate_object_pool_on_move_ff_enabled?
return unless project.repository_exists?
pool_repository = project.pool_repository
return unless pool_repository
# If pool repository already exists, then we will link the moved project repository to it
return if pool_repository_exists_for?(shard_name: destination_storage_name, pool_repository: pool_repository)
target_pool_repository = create_pool_repository_for!(
shard_name: destination_storage_name,
pool_repository: pool_repository
)
Repositories::ReplicateService.new(pool_repository.object_pool.repository)
.execute(target_pool_repository.object_pool.repository, :object_pool)
end
def remove_old_paths
super
if project.wiki.repository_exists?
Gitlab::Git::Repository.new(
source_storage_name,
"#{project.wiki.disk_path}.git",
nil,
nil
).remove
end
if project.design_repository.exists?
Gitlab::Git::Repository.new(
source_storage_name,
"#{project.design_repository.disk_path}.git",
nil,
nil
).remove
end
end
def pool_repository_exists_for?(shard_name:, pool_repository:)
PoolRepository.by_disk_path_and_shard_name(
pool_repository.disk_path,
shard_name
).exists?
end
def create_pool_repository_for!(shard_name:, pool_repository:)
# Set state `ready` because we manually replicate object pool
PoolRepository.create!(
shard: Shard.by_name(shard_name),
source_project: pool_repository.source_project,
disk_path: pool_repository.disk_path,
state: 'ready'
)
end
def replicate_object_pool_on_move_ff_enabled?
Feature.enabled?(:replicate_object_pool_on_move, project)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::UpdateRepositoryStorageService, feature_category: :source_code_management do
include Gitlab::ShellAdapter
subject { described_class.new(repository_storage_move) }
describe "#execute" do
let(:time) { Time.current }
before do
allow(Time).to receive(:now).and_return(time)
stub_storage_settings('test_second_storage' => {})
end
context 'without wiki and design repository' do
let!(:shard_default) { create(:shard, name: 'default') }
let!(:shard_second_storage) { create(:shard, name: 'test_second_storage') }
let(:project) { create(:project, :repository, wiki_enabled: false) }
let(:destination) { 'test_second_storage' }
let(:repository_storage_move) { create(:project_repository_storage_move, :scheduled, container: project, destination_storage_name: destination) }
let!(:checksum) { project.repository.checksum }
let(:project_repository_double) { double(:repository) }
let(:original_project_repository_double) { double(:repository) }
let(:object_pool_double) { double(:object_pool, repository: object_pool_repository_double) }
let(:object_pool_repository_double) { double(:repository) }
let(:original_object_pool_double) { double(:object_pool, repository: original_object_pool_repository_double) }
let(:original_object_pool_repository_double) { double(:repository) }
before do
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
allow(Gitlab::Git::Repository).to receive(:new).and_call_original
allow(Gitlab::Git::Repository).to receive(:new)
.with('test_second_storage', project.repository.raw.relative_path, project.repository.gl_repository, project.repository.full_path)
.and_return(project_repository_double)
allow(Gitlab::Git::Repository).to receive(:new)
.with('default', project.repository.raw.relative_path, nil, nil)
.and_return(original_project_repository_double)
allow(Gitlab::Git::ObjectPool).to receive(:new).and_call_original
allow(Gitlab::Git::ObjectPool).to receive(:new)
.with('test_second_storage', anything, anything, anything)
.and_return(object_pool_double)
allow(Gitlab::Git::ObjectPool).to receive(:new)
.with('default', anything, anything, anything)
.and_return(original_object_pool_double)
allow(original_object_pool_double).to receive(:create)
allow(object_pool_double).to receive(:create)
end
context 'when the move succeeds' do
it 'moves the repository to the new storage and unmarks the repository as read-only' do
expect(project_repository_double).to receive(:replicate)
.with(project.repository.raw)
expect(project_repository_double).to receive(:checksum)
.and_return(checksum)
expect(original_project_repository_double).to receive(:remove)
result = subject.execute
project.reload
expect(result).to be_success
expect(project).not_to be_repository_read_only
expect(project.repository_storage).to eq('test_second_storage')
expect(project.project_repository.shard_name).to eq('test_second_storage')
expect(repository_storage_move.reload).to be_finished
expect(repository_storage_move.error_message).to be_nil
end
end
context 'when touch raises an exception' do
let(:exception) { RuntimeError.new('Boom') }
it 'marks the storage move as failed and restores read-write access' do
allow(repository_storage_move).to receive(:container).and_return(project)
allow(project).to receive(:touch).and_wrap_original do
project.assign_attributes(updated_at: 1.second.ago)
raise exception
end
expect(project_repository_double).to receive(:replicate)
.with(project.repository.raw)
expect(project_repository_double).to receive(:checksum)
.and_return(checksum)
expect { subject.execute }.to raise_error(exception)
project.reload
expect(project).not_to be_repository_read_only
expect(repository_storage_move.reload).to be_failed
expect(repository_storage_move.error_message).to eq('Boom')
end
end
context 'when the filesystems are the same' do
before do
expect(Gitlab::GitalyClient).to receive(:filesystem_id).twice.and_return(SecureRandom.uuid)
end
it 'updates the database without trying to move the repostory', :aggregate_failures do
result = subject.execute
project.reload
expect(result).to be_success
expect(project).not_to be_repository_read_only
expect(project.repository_storage).to eq('test_second_storage')
expect(project.project_repository.shard_name).to eq('test_second_storage')
end
end
context 'when the move fails' do
it 'unmarks the repository as read-only without updating the repository storage' do
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
expect(project_repository_double).to receive(:replicate)
.with(project.repository.raw)
.and_raise(Gitlab::Git::CommandError, 'Boom')
expect(project_repository_double).to receive(:remove)
expect do
subject.execute
end.to raise_error(Gitlab::Git::CommandError)
expect(project).not_to be_repository_read_only
expect(project.repository_storage).to eq('default')
expect(repository_storage_move).to be_failed
expect(repository_storage_move.error_message).to eq('Boom')
end
end
context 'when the cleanup fails' do
it 'sets the correct state' do
expect(project_repository_double).to receive(:replicate)
.with(project.repository.raw)
expect(project_repository_double).to receive(:checksum)
.and_return(checksum)
expect(original_project_repository_double).to receive(:remove)
.and_raise(Gitlab::Git::CommandError)
expect do
subject.execute
end.to raise_error(Gitlab::Git::CommandError)
expect(repository_storage_move).to be_cleanup_failed
end
end
context 'when the checksum does not match' do
it 'unmarks the repository as read-only without updating the repository storage' do
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
expect(project_repository_double).to receive(:replicate)
.with(project.repository.raw)
expect(project_repository_double).to receive(:checksum)
.and_return('not matching checksum')
expect(project_repository_double).to receive(:remove)
expect do
subject.execute
end.to raise_error(Repositories::ReplicateService::Error, /Failed to verify project repository checksum/)
expect(project).not_to be_repository_read_only
expect(project.repository_storage).to eq('default')
end
end
context 'with repository pool' do
let(:shard_from) { shard_default }
let(:shard_to) { shard_second_storage }
let(:old_object_pool_checksum) { 'abcd' }
let(:new_object_pool_checksum) { old_object_pool_checksum }
before do
allow(project_repository_double).to receive(:replicate).with(project.repository.raw)
allow(project_repository_double).to receive(:checksum).and_return(checksum)
allow(original_project_repository_double).to receive(:remove)
allow(object_pool_repository_double).to receive(:replicate).with(original_object_pool_repository_double)
allow(object_pool_repository_double).to receive(:checksum).and_return(new_object_pool_checksum)
allow(original_object_pool_repository_double).to receive(:checksum).and_return(old_object_pool_checksum)
allow(object_pool_double).to receive(:link) do |repository|
expect(repository.storage).to eq 'test_second_storage'
end
end
context 'when project had a repository pool' do
let!(:pool_repository) { create(:pool_repository, :ready, shard: shard_from, source_project: project) }
it 'creates a new repository pool and connects project to it' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
new_pool_repository = project.pool_repository
expect(new_pool_repository).not_to eq(pool_repository)
expect(new_pool_repository.shard).to eq(shard_second_storage)
expect(new_pool_repository.state).to eq('ready')
expect(new_pool_repository.disk_path).to eq(pool_repository.disk_path)
expect(new_pool_repository.source_project).to eq(project)
expect(object_pool_double).to have_received(:link).with(project.repository.raw)
end
context 'when feature flag replicate_object_pool_on_move is disabled' do
before do
stub_feature_flags(replicate_object_pool_on_move: false)
end
it 'just moves the repository without the object pool' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
new_pool_repository = project.pool_repository
expect(new_pool_repository).to eq(pool_repository)
expect(new_pool_repository.shard).to eq(shard_default)
expect(new_pool_repository.state).to eq('ready')
expect(new_pool_repository.source_project).to eq(project)
expect(object_pool_repository_double).not_to have_received(:replicate)
expect(object_pool_double).not_to have_received(:link)
end
end
context 'when new shard has a repository pool' do
let!(:new_pool_repository) { create(:pool_repository, :ready, shard: shard_to, source_project: project) }
it 'connects project to it' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
project_pool_repository = project.pool_repository
expect(project_pool_repository).to eq(new_pool_repository)
expect(object_pool_double).to have_received(:link).with(project.repository.raw)
end
end
context 'when new shard has a repository pool without the root project' do
let!(:new_pool_repository) { create(:pool_repository, :ready, shard: shard_to, disk_path: pool_repository.disk_path) }
before do
pool_repository.update!(source_project: nil)
new_pool_repository.update!(source_project: nil)
end
it 'connects project to it' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
project_pool_repository = project.pool_repository
expect(project_pool_repository).to eq(new_pool_repository)
expect(object_pool_double).to have_received(:link).with(project.repository.raw)
end
end
context 'when repository does not exist' do
let(:project) { create(:project) }
let(:checksum) { nil }
it 'does not mirror object pool' do
result = subject.execute
expect(result).to be_success
expect(object_pool_repository_double).not_to have_received(:replicate)
end
end
context 'when project belongs to repository pool, but not as a root project' do
let!(:another_project) { create(:project, :repository) }
let!(:pool_repository) { create(:pool_repository, :ready, shard: shard_from, source_project: another_project) }
before do
project.update!(pool_repository: pool_repository)
end
it 'creates a new repository pool and connects project to it' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
new_pool_repository = project.pool_repository
expect(new_pool_repository).not_to eq(pool_repository)
expect(new_pool_repository.shard).to eq(shard_second_storage)
expect(new_pool_repository.state).to eq('ready')
expect(new_pool_repository.source_project).to eq(another_project)
expect(object_pool_double).to have_received(:link).with(project.repository.raw)
end
end
context 'when project belongs to the repository pool without a root project' do
let!(:pool_repository) { create(:pool_repository, :ready, shard: shard_from) }
before do
pool_repository.update!(source_project: nil)
project.update!(pool_repository: pool_repository)
end
it 'creates a new repository pool without a root project and connects project to it' do
result = subject.execute
expect(result).to be_success
project.reload.cleanup
new_pool_repository = project.pool_repository
expect(new_pool_repository).not_to eq(pool_repository)
expect(new_pool_repository.shard).to eq(shard_second_storage)
expect(new_pool_repository.state).to eq('ready')
expect(new_pool_repository.source_project).to eq(nil)
expect(new_pool_repository.disk_path).to eq(pool_repository.disk_path)
expect(object_pool_double).to have_received(:link).with(project.repository.raw)
end
end
context 'when object pool checksum does not match' do
let(:new_object_pool_checksum) { 'not_match' }
it 'raises an error and removes the new object pool repository' do
expect(object_pool_repository_double).to receive(:remove)
original_count = PoolRepository.count
expect do
subject.execute
end.to raise_error(Repositories::ReplicateService::Error, /Failed to verify object_pool repository/)
project.reload
expect(PoolRepository.count).to eq(original_count)
expect(project.pool_repository).to eq(pool_repository)
expect(project.repository.shard).to eq('default')
end
end
end
end
context 'when the repository move is finished' do
let(:repository_storage_move) { create(:project_repository_storage_move, :finished, container: project, destination_storage_name: destination) }
it 'is idempotent' do
expect do
result = subject.execute
expect(result).to be_success
end.not_to change(repository_storage_move, :state)
end
end
context 'when the repository move is failed' do
let(:repository_storage_move) { create(:project_repository_storage_move, :failed, container: project, destination_storage_name: destination) }
it 'is idempotent' do
expect do
result = subject.execute
expect(result).to be_success
end.not_to change(repository_storage_move, :state)
end
end
end
context 'project with no repositories' do
let(:project) { create(:project) }
let(:repository_storage_move) { create(:project_repository_storage_move, :scheduled, container: project, destination_storage_name: 'test_second_storage') }
it 'updates the database' do
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
result = subject.execute
project.reload
expect(result).to be_success
expect(project).not_to be_repository_read_only
expect(project.repository_storage).to eq('test_second_storage')
expect(project.project_repository.shard_name).to eq('test_second_storage')
end
end
context 'with wiki repository' do
include_examples 'moves repository to another storage', 'wiki' do
let(:project) { create(:project, :repository, wiki_enabled: true) }
let(:repository) { project.wiki.repository }
let(:destination) { 'test_second_storage' }
let(:repository_storage_move) { create(:project_repository_storage_move, :scheduled, container: project, destination_storage_name: destination) }
before do
project.create_wiki
end
end
end
context 'with design repository' do
include_examples 'moves repository to another storage', 'design' do
let(:project) { create(:project, :repository) }
let(:repository) { project.design_repository }
let(:destination) { 'test_second_storage' }
let(:repository_storage_move) { create(:project_repository_storage_move, :scheduled, container: project, destination_storage_name: destination) }
before do
project.design_repository.create_if_not_exists
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class FetchStatisticsIncrementService
attr_reader :project
def initialize(project)
@project = project
end
def execute
increment_fetch_count_sql = <<~SQL
INSERT INTO #{table_name} (project_id, date, fetch_count)
VALUES (#{project.id}, '#{Date.today}', 1)
ON CONFLICT (project_id, date) DO UPDATE SET fetch_count = #{table_name}.fetch_count + 1
SQL
ProjectDailyStatistic.connection.execute(increment_fetch_count_sql)
end
private
def table_name
ProjectDailyStatistic.table_name
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
module Projects
RSpec.describe FetchStatisticsIncrementService, feature_category: :groups_and_projects do
let(:project) { create(:project) }
describe '#execute' do
subject { described_class.new(project).execute }
it 'creates a new record for today with count == 1' do
expect { subject }.to change { ProjectDailyStatistic.count }.by(1)
created_stat = ProjectDailyStatistic.last
expect(created_stat.fetch_count).to eq(1)
expect(created_stat.project).to eq(project)
expect(created_stat.date).to eq(Date.today)
end
it "doesn't increment previous days statistics" do
yesterday_stat = create(:project_daily_statistic, fetch_count: 5, project: project, date: 1.day.ago)
expect { subject }.not_to change { yesterday_stat.reload.fetch_count }
end
context 'when the record already exists for today' do
let!(:project_daily_stat) { create(:project_daily_statistic, fetch_count: 5, project: project, date: Date.today) }
it 'increments the today record count by 1' do
expect { subject }.to change { project_daily_stat.reload.fetch_count }.to(6)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UpdatePagesService < BaseService
include Gitlab::Utils::StrongMemoize
# old deployment can be cached by pages daemon
# so we need to give pages daemon some time update cache
# 10 minutes is enough, but 30 feels safer
OLD_DEPLOYMENTS_DESTRUCTION_DELAY = 30.minutes
attr_reader :build, :deployment_update
def initialize(project, build)
@project = project
@build = build
@deployment_update = ::Gitlab::Pages::DeploymentUpdate.new(project, build)
end
def execute
register_attempt
::Ci::Pipelines::AddJobService.new(@build.pipeline).execute!(commit_status) do |job|
job.enqueue!
job.run!
end
return error(deployment_update.errors.first.full_message) unless deployment_update.valid?
build.artifacts_file.use_file do |artifacts_path|
deployment = create_pages_deployment(artifacts_path, build)
break error('The uploaded artifact size does not match the expected value') unless deployment
break error(deployment_update.errors.first.full_message) unless deployment_update.valid?
deactive_old_deployments(deployment)
success
end
rescue StandardError => e
error(e.message)
raise e
end
private
def success
commit_status.success
publish_deployed_event
super
end
def error(message)
register_failure
log_error("Projects::UpdatePagesService: #{message}")
commit_status.allow_failure = !deployment_update.latest?
commit_status.description = message
commit_status.drop(:script_failure)
super
end
# Create status notifying the deployment of pages
def commit_status
GenericCommitStatus.new(
user: build.user,
ci_stage: stage,
name: 'pages:deploy',
stage: 'deploy',
stage_idx: stage.position
)
end
strong_memoize_attr :commit_status
# rubocop: disable Performance/ActiveRecordSubtransactionMethods
def stage
build.pipeline.stages.safe_find_or_create_by(name: 'deploy', pipeline_id: build.pipeline.id) do |stage|
stage.position = GenericCommitStatus::EXTERNAL_STAGE_IDX
stage.project = build.project
end
end
strong_memoize_attr :commit_status
# rubocop: enable Performance/ActiveRecordSubtransactionMethods
def create_pages_deployment(artifacts_path, build)
File.open(artifacts_path) do |file|
attributes = pages_deployment_attributes(file, build)
deployment = project.pages_deployments.build(**attributes)
break if deployment.file.size != file.size
deployment.tap(&:save!)
end
end
# overridden on EE
def pages_deployment_attributes(file, build)
{
file: file,
file_count: deployment_update.entries_count,
file_sha256: build.job_artifacts_archive.file_sha256,
ci_build_id: build.id,
root_directory: build.options[:publish],
upload_ready: false
}
end
def deactive_old_deployments(deployment)
PagesDeployment.deactivate_deployments_older_than(
deployment,
time: OLD_DEPLOYMENTS_DESTRUCTION_DELAY.from_now)
end
def register_attempt
pages_deployments_total_counter.increment
end
def register_failure
pages_deployments_failed_total_counter.increment
end
def pages_deployments_total_counter
Gitlab::Metrics.counter(:pages_deployments_total, "Counter of GitLab Pages deployments triggered")
end
strong_memoize_attr :pages_deployments_total_counter
def pages_deployments_failed_total_counter
Gitlab::Metrics.counter(:pages_deployments_failed_total, "Counter of GitLab Pages deployments which failed")
end
strong_memoize_attr :pages_deployments_failed_total_counter
def publish_deployed_event
event = ::Pages::PageDeployedEvent.new(data: {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
})
Gitlab::EventStore.publish(event)
end
end
end
::Projects::UpdatePagesService.prepend_mod
``` | # frozen_string_literal: true
require "spec_helper"
RSpec.describe Projects::UpdatePagesService, feature_category: :pages do
let_it_be(:project, refind: true) { create(:project, :repository) }
let_it_be(:old_pipeline) { create(:ci_pipeline, project: project, sha: project.commit('HEAD').sha) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project, sha: project.commit('HEAD').sha) }
let(:options) { {} }
let(:build) { create(:ci_build, pipeline: pipeline, ref: 'HEAD', options: options) }
let(:invalid_file) { fixture_file_upload('spec/fixtures/dk.png') }
let(:file) { fixture_file_upload("spec/fixtures/pages.zip") }
let(:custom_root_file) { fixture_file_upload("spec/fixtures/pages_with_custom_root.zip") }
let(:empty_file) { fixture_file_upload("spec/fixtures/pages_empty.zip") }
let(:empty_metadata_filename) { "spec/fixtures/pages_empty.zip.meta" }
let(:metadata_filename) { "spec/fixtures/pages.zip.meta" }
let(:custom_root_file_metadata) { "spec/fixtures/pages_with_custom_root.zip.meta" }
let(:metadata) { fixture_file_upload(metadata_filename) if File.exist?(metadata_filename) }
subject(:service) { described_class.new(project, build) }
RSpec.shared_examples 'old deployments' do
it 'deactivates old deployments from the same project with the same path prefix', :freeze_time do
other_project = create(:pages_deployment)
same_project_other_path_prefix = create(:pages_deployment, project: project, path_prefix: 'other')
same_project = create(:pages_deployment, project: project)
expect { expect(service.execute[:status]).to eq(:success) }
.to not_change { other_project.reload.deleted_at }
.and not_change { same_project_other_path_prefix.reload.deleted_at }
.and change { same_project.reload.deleted_at }
.from(nil).to(described_class::OLD_DEPLOYMENTS_DESTRUCTION_DELAY.from_now)
end
end
RSpec.shared_examples 'pages size limit is' do |size_limit|
context "when size is below the limit" do
before do
allow(metadata).to receive(:total_size).and_return(size_limit - 1.megabyte)
allow(metadata).to receive(:entries).and_return([])
end
it 'updates pages correctly' do
subject.execute
deploy_status = GenericCommitStatus.last
expect(deploy_status.description).not_to be_present
expect(project.pages_deployed?).to eq(true)
end
it_behaves_like 'old deployments'
end
context "when size is above the limit" do
before do
allow(metadata).to receive(:total_size).and_return(size_limit + 1.megabyte)
allow(metadata).to receive(:entries).and_return([])
end
it 'limits the maximum size of gitlab pages' do
subject.execute
deploy_status = GenericCommitStatus.last
expect(deploy_status.description).to match(/artifacts for pages are too large/)
expect(deploy_status).to be_script_failure
end
end
end
context 'when a deploy stage already exists', :aggregate_failures do
let!(:stage) { create(:ci_stage, name: 'deploy', pipeline: pipeline) }
it 'assigns the deploy stage' do
expect { service.execute }
.to change(GenericCommitStatus, :count).by(1)
.and change(Ci::Stage.where(name: 'deploy'), :count).by(0)
status = GenericCommitStatus.last
expect(status.ci_stage).to eq(stage)
expect(status.ci_stage.name).to eq('deploy')
expect(status.stage_name).to eq('deploy')
expect(status.stage).to eq('deploy')
end
end
context 'when a deploy stage does not exists' do
it 'assigns the deploy stage' do
expect { service.execute }
.to change(GenericCommitStatus, :count).by(1)
.and change(Ci::Stage.where(name: 'deploy'), :count).by(1)
status = GenericCommitStatus.last
expect(status.ci_stage.name).to eq('deploy')
expect(status.stage_name).to eq('deploy')
expect(status.stage).to eq('deploy')
end
end
context 'for new artifacts' do
context "for a valid job" do
let!(:artifacts_archive) { create(:ci_job_artifact, :correct_checksum, file: file, job: build) }
before do
create(:ci_job_artifact, file_type: :metadata, file_format: :gzip, file: metadata, job: build)
build.reload
end
it_behaves_like 'old deployments'
it "doesn't delete artifacts after deploying" do
expect(service.execute[:status]).to eq(:success)
expect(project.pages_deployed?).to eq(true)
expect(build.artifacts?).to eq(true)
end
it 'succeeds' do
expect { expect(service.execute[:status]).to eq(:success) }
.to change { project.pages_deployed? }
.from(false).to(true)
end
it 'publishes a PageDeployedEvent event with project id and namespace id' do
expected_data = {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
}
expect { service.execute }.to publish_event(Pages::PageDeployedEvent).with(expected_data)
end
it 'creates pages_deployment' do
expect { expect(service.execute[:status]).to eq(:success) }
.to change { project.pages_deployments.count }
.by(1)
deployment = project.pages_deployments.last
expect(deployment.size).to eq(file.size)
expect(deployment.file).to be_present
expect(deployment.file_count).to eq(3)
expect(deployment.file_sha256).to eq(artifacts_archive.file_sha256)
expect(deployment.ci_build_id).to eq(build.id)
expect(deployment.root_directory).to be_nil
end
it 'does not fail if pages_metadata is absent' do
project.pages_metadatum.destroy!
project.reload
expect { expect(service.execute[:status]).to eq(:success) }
.to change { project.pages_deployments.count }
.by(1)
end
context 'when archive does not have pages directory' do
let(:file) { empty_file }
let(:metadata_filename) { empty_metadata_filename }
it 'returns an error' do
expect(service.execute[:status]).not_to eq(:success)
expect(GenericCommitStatus.last.description)
.to eq(
"Error: You need to either include a `public/` folder in your artifacts, " \
"or specify which one to use for Pages using `publish` in `.gitlab-ci.yml`")
end
end
context 'when there is a custom root config' do
let(:file) { custom_root_file }
let(:metadata_filename) { custom_root_file_metadata }
context 'when the directory specified with `publish` is included in the artifacts' do
let(:options) { { publish: 'foo' } }
it 'creates pages_deployment and saves it in the metadata' do
expect(service.execute[:status]).to eq(:success)
deployment = project.pages_deployments.last
expect(deployment.root_directory).to eq(options[:publish])
end
end
context 'when the directory specified with `publish` is not included in the artifacts' do
let(:options) { { publish: 'bar' } }
it 'returns an error' do
expect(service.execute[:status]).not_to eq(:success)
expect(GenericCommitStatus.last.description)
.to eq(
"Error: You need to either include a `public/` folder in your artifacts, " \
"or specify which one to use for Pages using `publish` in `.gitlab-ci.yml`")
end
end
context 'when there is a folder named `public`, but `publish` specifies a different one' do
let(:options) { { publish: 'foo' } }
let(:file) { fixture_file_upload("spec/fixtures/pages.zip") }
let(:metadata_filename) { "spec/fixtures/pages.zip.meta" }
it 'returns an error' do
expect(service.execute[:status]).not_to eq(:success)
expect(GenericCommitStatus.last.description)
.to eq(
"Error: You need to either include a `public/` folder in your artifacts, " \
"or specify which one to use for Pages using `publish` in `.gitlab-ci.yml`")
end
end
end
it 'limits pages size' do
stub_application_setting(max_pages_size: 1)
expect(service.execute[:status]).not_to eq(:success)
end
it 'limits pages file count' do
create(:plan_limits, :default_plan, pages_file_entries: 2)
expect(service.execute[:status]).not_to eq(:success)
expect(GenericCommitStatus.last.description)
.to eq("pages site contains 3 file entries, while limit is set to 2")
end
context 'when timeout happens by DNS error' do
before do
allow_next_instance_of(described_class) do |instance|
allow(instance).to receive(:create_pages_deployment).and_raise(SocketError)
end
end
it 'raises an error' do
expect { service.execute }.to raise_error(SocketError)
build.reload
deploy_status = GenericCommitStatus.last
expect(deploy_status).to be_failed
expect(project.pages_deployed?).to eq(false)
end
end
context 'when missing artifacts metadata' do
before do
allow(build).to receive(:artifacts_metadata?).and_return(false)
end
it 'does not raise an error as failed job' do
service.execute
build.reload
deploy_status = GenericCommitStatus.last
expect(deploy_status).to be_failed
expect(project.pages_deployed?).to eq(false)
end
end
context 'with background jobs running', :sidekiq_inline do
it 'succeeds' do
expect(project.pages_deployed?).to be_falsey
expect(service.execute[:status]).to eq(:success)
end
end
context "when sha on branch was updated before deployment was uploaded" do
before do
expect(subject).to receive(:create_pages_deployment).and_wrap_original do |m, *args|
build.update!(ref: 'feature')
m.call(*args)
end
end
it 'creates a new pages deployment' do
expect { expect(service.execute[:status]).to eq(:success) }
.to change { project.pages_deployments.count }.by(1)
deployment = project.pages_deployments.last
expect(deployment.ci_build_id).to eq(build.id)
end
it_behaves_like 'old deployments'
context 'when newer deployment present' do
it 'fails with outdated reference message' do
new_pipeline = create(:ci_pipeline, project: project, sha: project.commit('HEAD').sha)
new_build = create(:ci_build, name: 'pages', pipeline: new_pipeline, ref: 'HEAD')
create(:pages_deployment, project: project, ci_build: new_build)
expect(service.execute[:status]).to eq(:error)
deploy_status = GenericCommitStatus.last
expect(deploy_status).to be_failed
expect(deploy_status.description).to eq('build SHA is outdated for this ref')
end
end
end
it 'fails when uploaded deployment size is wrong' do
allow_next_instance_of(PagesDeployment) do |deployment|
allow(deployment)
.to receive(:file)
.and_return(instance_double(Pages::DeploymentUploader, size: file.size + 1))
end
expect(service.execute[:status]).not_to eq(:success)
expect(GenericCommitStatus.last.description)
.to eq('The uploaded artifact size does not match the expected value')
end
end
end
# this situation should never happen in real life because all new archives have sha256
# and we only use new archives
# this test is here just to clarify that this behavior is intentional
context 'when artifacts archive does not have sha256' do
let!(:artifacts_archive) { create(:ci_job_artifact, file: file, job: build) }
before do
create(:ci_job_artifact, file_type: :metadata, file_format: :gzip, file: metadata, job: build)
build.reload
end
it 'fails with exception raised' do
expect { service.execute }
.to raise_error("Validation failed: File sha256 can't be blank")
end
end
it 'fails if no artifacts' do
expect(service.execute[:status]).not_to eq(:success)
end
it 'fails for invalid archive' do
create(:ci_job_artifact, :archive, file: invalid_file, job: build)
expect(service.execute[:status]).not_to eq(:success)
end
describe 'maximum pages artifacts size' do
let(:metadata) { spy('metadata') }
before do
file = fixture_file_upload('spec/fixtures/pages.zip')
metafile = fixture_file_upload('spec/fixtures/pages.zip.meta')
create(:ci_job_artifact, :archive, :correct_checksum, file: file, job: build)
create(:ci_job_artifact, :metadata, file: metafile, job: build)
allow(build).to receive(:artifacts_metadata_entry)
.and_return(metadata)
end
context 'when maximum pages size is set to zero' do
before do
stub_application_setting(max_pages_size: 0)
end
it_behaves_like 'pages size limit is', ::Gitlab::Pages::MAX_SIZE
end
context 'when size is limited on the instance level' do
before do
stub_application_setting(max_pages_size: 100)
end
it_behaves_like 'pages size limit is', 100.megabytes
end
end
context 'when retrying the job' do
let(:stage) { create(:ci_stage, position: 1_000_000, name: 'deploy', pipeline: pipeline) }
let!(:older_deploy_job) do
create(
:generic_commit_status,
:failed,
pipeline: pipeline,
ref: build.ref,
ci_stage: stage,
name: 'pages:deploy'
)
end
before do
create(:ci_job_artifact, :correct_checksum, file: file, job: build)
create(:ci_job_artifact, file_type: :metadata, file_format: :gzip, file: metadata, job: build)
build.reload
end
it 'marks older pages:deploy jobs retried' do
expect(service.execute[:status]).to eq(:success)
expect(older_deploy_job.reload).to be_retried
deploy_status = GenericCommitStatus.last
expect(deploy_status.ci_stage).to eq(stage)
expect(deploy_status.stage_idx).to eq(stage.position)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# NOTE: This service cannot be used directly because it is part of a
# a bigger process. Instead, use the service MoveAccessService which moves
# project memberships, project group links, authorizations and refreshes
# the authorizations if necessary
module Projects
class MoveProjectMembersService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
Project.transaction do
move_project_members
remove_remaining_members if remove_remaining_elements
success
end
end
private
def move_project_members
non_existent_members.update_all(source_id: @project.id)
end
def remove_remaining_members
# Remove remaining members and authorizations from source_project
source_project.project_members.destroy_all # rubocop: disable Cop/DestroyAll
end
def project_members_in_target_project
@project.project_members.select(:user_id)
end
# Look for members in source_project that are not in the target project
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_members
source_project.members
.select(:id)
.where.not(user_id: @project.project_members.select(:user_id))
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveProjectMembersService, feature_category: :groups_and_projects do
let!(:user) { create(:user) }
let(:project_with_users) { create(:project, namespace: user.namespace) }
let(:target_project) { create(:project, namespace: user.namespace) }
let(:maintainer_user) { create(:user) }
let(:reporter_user) { create(:user) }
let(:developer_user) { create(:user) }
subject { described_class.new(target_project, user) }
describe '#execute' do
before do
project_with_users.add_maintainer(maintainer_user)
project_with_users.add_developer(developer_user)
project_with_users.add_reporter(reporter_user)
end
it 'moves the members from one project to another' do
expect(project_with_users.project_members.count).to eq 4
expect(target_project.project_members.count).to eq 1
subject.execute(project_with_users)
expect(project_with_users.project_members.count).to eq 0
expect(target_project.project_members.count).to eq 4
end
it 'does not move existent members to the current project' do
target_project.add_maintainer(developer_user)
target_project.add_developer(reporter_user)
expect(project_with_users.project_members.count).to eq 4
expect(target_project.project_members.count).to eq 3
subject.execute(project_with_users)
expect(project_with_users.project_members.count).to eq 0
expect(target_project.project_members.count).to eq 4
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_users) }.to raise_error(StandardError)
expect(project_with_users.project_members.count).to eq 4
expect(target_project.project_members.count).to eq 1
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining project members' do
target_project.add_maintainer(developer_user)
target_project.add_developer(reporter_user)
subject.execute(project_with_users, **options)
expect(project_with_users.project_members.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# Service class for getting and caching the number of merge requests of several projects
# Warning: do not user this service with a really large set of projects
# because the service use maps to retrieve the project ids
module Projects
class BatchOpenMergeRequestsCountService < Projects::BatchCountService
# rubocop: disable CodeReuse/ActiveRecord
def global_count
@global_count ||= count_service.query(project_ids).group(:project_id).count
end
# rubocop: enable CodeReuse/ActiveRecord
def count_service
::Projects::OpenMergeRequestsCountService
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::BatchOpenMergeRequestsCountService, feature_category: :code_review_workflow do
subject { described_class.new([project_1, project_2]) }
let_it_be(:project_1) { create(:project) }
let_it_be(:project_2) { create(:project) }
describe '#refresh_cache_and_retrieve_data', :use_clean_rails_memory_store_caching do
before do
create(:merge_request, source_project: project_1, target_project: project_1)
create(:merge_request, source_project: project_2, target_project: project_2)
end
it 'refreshes cache keys correctly when cache is clean', :aggregate_failures do
subject.refresh_cache_and_retrieve_data
expect(Rails.cache.read(get_cache_key(subject, project_1))).to eq(1)
expect(Rails.cache.read(get_cache_key(subject, project_2))).to eq(1)
expect { subject.refresh_cache_and_retrieve_data }.not_to exceed_query_limit(0)
end
end
def get_cache_key(subject, project)
subject.count_service
.new(project)
.cache_key
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class SlackApplicationInstallService < BaseService
include Gitlab::Routing
# Endpoint to initiate the OAuth flow, redirects to Slack's authorization screen
# https://api.slack.com/authentication/oauth-v2#asking
SLACK_AUTHORIZE_URL = 'https://slack.com/oauth/v2/authorize'
# Endpoint to exchange the temporary authorization code for an access token
# https://api.slack.com/authentication/oauth-v2#exchanging
SLACK_EXCHANGE_TOKEN_URL = 'https://slack.com/api/oauth.v2.access'
def execute
slack_data = exchange_slack_token
return error("Slack: #{slack_data['error']}") unless slack_data['ok']
integration = project.gitlab_slack_application_integration \
|| project.create_gitlab_slack_application_integration!
installation = integration.slack_integration || integration.build_slack_integration
installation.update!(
bot_user_id: slack_data['bot_user_id'],
bot_access_token: slack_data['access_token'],
team_id: slack_data.dig('team', 'id'),
team_name: slack_data.dig('team', 'name'),
alias: project.full_path,
user_id: slack_data.dig('authed_user', 'id'),
authorized_scope_names: slack_data['scope']
)
update_legacy_installations!(installation)
success
end
private
def exchange_slack_token
query = {
client_id: Gitlab::CurrentSettings.slack_app_id,
client_secret: Gitlab::CurrentSettings.slack_app_secret,
code: params[:code],
# NOTE: Needs to match the `redirect_uri` passed to the authorization endpoint,
# otherwise we get a `bad_redirect_uri` error.
redirect_uri: slack_auth_project_settings_slack_url(project)
}
Gitlab::HTTP.get(SLACK_EXCHANGE_TOKEN_URL, query: query).to_hash
end
# Update any legacy SlackIntegration records for the Slack Workspace. Legacy SlackIntegration records
# are any created before our Slack App was upgraded to use Granular Bot Permissions and issue a
# bot_access_token. Any SlackIntegration records for the Slack Workspace will already have the same
# bot_access_token.
def update_legacy_installations!(installation)
updatable_attributes = installation.attributes.slice(
'user_id',
'bot_user_id',
'encrypted_bot_access_token',
'encrypted_bot_access_token_iv',
'updated_at'
)
SlackIntegration.by_team(installation.team_id).id_not_in(installation.id).each_batch do |batch|
batch_ids = batch.pluck(:id) # rubocop: disable CodeReuse/ActiveRecord
batch.update_all(updatable_attributes)
::Integrations::SlackWorkspace::IntegrationApiScope.update_scopes(batch_ids, installation.slack_api_scopes)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::SlackApplicationInstallService, feature_category: :integrations do
let_it_be(:user) { create(:user) }
let_it_be_with_refind(:project) { create(:project) }
let(:integration) { project.gitlab_slack_application_integration }
let(:installation) { integration.slack_integration }
let(:slack_app_id) { 'A12345' }
let(:slack_app_secret) { 'secret' }
let(:oauth_code) { 'code' }
let(:params) { { code: oauth_code } }
let(:exchange_url) { described_class::SLACK_EXCHANGE_TOKEN_URL }
let(:redirect_url) { Gitlab::Routing.url_helpers.slack_auth_project_settings_slack_url(project) }
subject(:service) { described_class.new(project, user, params) }
before do
stub_application_setting(slack_app_id: slack_app_id, slack_app_secret: slack_app_secret)
query = {
client_id: slack_app_id,
client_secret: slack_app_secret,
code: oauth_code,
redirect_uri: redirect_url
}
stub_request(:get, exchange_url)
.with(query: query)
.to_return(body: response.to_json, headers: { 'Content-Type' => 'application/json' })
end
context 'when Slack responds with an error' do
let(:response) do
{
ok: false,
error: 'something is wrong'
}
end
it 'returns error result' do
result = service.execute
expect(result).to eq(message: 'Slack: something is wrong', status: :error)
end
end
context 'when Slack responds with an access token' do
let_it_be(:team_id) { 'T11111' }
let_it_be(:team_name) { 'Team name' }
let_it_be(:user_id) { 'U11111' }
let_it_be(:bot_user_id) { 'U99999' }
let_it_be(:bot_access_token) { 'token-XXXXX' }
let(:response) do
{
ok: true,
app_id: 'A12345',
authed_user: { id: user_id },
token_type: 'bot',
access_token: bot_access_token,
bot_user_id: bot_user_id,
team: { id: team_id, name: 'Team name' },
enterprise: { is_enterprise_install: false },
scope: 'chat:a,chat:b,chat:c'
}
end
shared_examples 'success response' do
it 'returns success result and creates all needed records' do
result = service.execute
expect(result).to eq(status: :success)
expect(integration).to be_present
expect(installation).to be_present
expect(installation).to have_attributes(
integration_id: integration.id,
team_id: team_id,
team_name: team_name,
alias: project.full_path,
user_id: user_id,
bot_user_id: bot_user_id,
bot_access_token: bot_access_token,
authorized_scope_names: contain_exactly('chat:a', 'chat:b', 'chat:c')
)
end
end
it_behaves_like 'success response'
context 'when integration record already exists' do
before do
project.create_gitlab_slack_application_integration!
end
it_behaves_like 'success response'
context 'when installation record already exists' do
before do
integration.create_slack_integration!(
team_id: 'old value',
team_name: 'old value',
alias: 'old value',
user_id: 'old value',
bot_user_id: 'old value',
bot_access_token: 'old value'
)
end
it_behaves_like 'success response'
end
end
context 'when the team has other Slack installation records' do
let_it_be_with_reload(:other_installation) { create(:slack_integration, team_id: team_id) }
let_it_be_with_reload(:other_legacy_installation) { create(:slack_integration, :legacy, team_id: team_id) }
let_it_be_with_reload(:legacy_installation_for_other_team) { create(:slack_integration, :legacy) }
it_behaves_like 'success response'
it 'updates related legacy records' do
travel_to(1.minute.from_now) do
expected_attributes = {
'user_id' => user_id,
'bot_user_id' => bot_user_id,
'bot_access_token' => bot_access_token,
'updated_at' => Time.current,
'authorized_scope_names' => %w[chat:a chat:b chat:c]
}
service.execute
expect(other_installation).to have_attributes(expected_attributes)
expect(other_legacy_installation).to have_attributes(expected_attributes)
expect(legacy_installation_for_other_team).not_to have_attributes(expected_attributes)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for performing operations that should take place after a
# project has been renamed.
#
# Example usage:
#
# project = Project.find(42)
#
# project.update(...)
#
# Projects::AfterRenameService.new(project).execute
class AfterRenameService
include BaseServiceUtility
# @return [String] The Project being renamed.
attr_reader :project
# @return [String] The path slug the project was using, before the rename took place.
attr_reader :path_before
# @return [String] The full path of the namespace + project, before the rename took place.
attr_reader :full_path_before
# @return [String] The full path of the namespace + project, after the rename took place.
attr_reader :full_path_after
RenameFailedError = Class.new(StandardError)
# @param [Project] project The Project being renamed.
# @param [String] path_before The path slug the project was using, before the rename took place.
def initialize(project, path_before:, full_path_before:)
@project = project
@path_before = path_before
@full_path_before = full_path_before
@full_path_after = project.full_path
end
def execute
rename_base_repository_in_registry!
expire_caches_before_rename
rename_or_migrate_repository!
send_move_instructions
execute_system_hooks
update_repository_configuration
rename_transferred_documents
log_completion
publish_event
end
def rename_base_repository_in_registry!
return unless project.has_container_registry_tags?
ensure_registry_tags_can_be_handled
result = ContainerRegistry::GitlabApiClient.rename_base_repository_path(
full_path_before, name: project_path)
return if result == :ok
rename_failed!("Renaming the base repository in the registry failed with error #{result}.")
end
def ensure_registry_tags_can_be_handled
return if Feature.enabled?(:renaming_project_with_tags, project) &&
ContainerRegistry::GitlabApiClient.supports_gitlab_api?
rename_failed!("Project #{full_path_before} cannot be renamed because images are " \
"present in its container registry")
end
def expire_caches_before_rename
project.expire_caches_before_rename(full_path_before)
end
def rename_or_migrate_repository!
success =
::Projects::HashedStorage::MigrationService
.new(project, full_path_before)
.execute
return if success
rename_failed!("Repository #{full_path_before} could not be renamed to #{full_path_after}")
end
def send_move_instructions
return unless send_move_instructions?
project.send_move_instructions(full_path_before)
end
def execute_system_hooks
project.old_path_with_namespace = full_path_before
system_hook_service.execute_hooks_for(project, :rename)
end
def update_repository_configuration
project.reload_repository!
project.set_full_path
project.track_project_repository
end
def rename_transferred_documents
if rename_uploads?
Gitlab::UploadsTransfer
.new
.rename_project(path_before, project_path, namespace_full_path)
end
end
def log_completion
log_info(
"Project #{project.id} has been renamed from " \
"#{full_path_before} to #{full_path_after}"
)
end
def send_move_instructions?
!project.import_started?
end
def rename_uploads?
!project.hashed_storage?(:attachments)
end
def project_path
project.path
end
def namespace_full_path
project.namespace.full_path
end
def rename_failed!(error)
log_error(error)
raise RenameFailedError, error
end
def publish_event
event = Projects::ProjectPathChangedEvent.new(data: {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id,
old_path: full_path_before,
new_path: full_path_after
})
Gitlab::EventStore.publish(event)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::AfterRenameService, feature_category: :groups_and_projects do
let(:legacy_storage) { Storage::LegacyProject.new(project) }
let(:hashed_storage) { Storage::Hashed.new(project) }
let!(:path_before_rename) { project.path }
let!(:full_path_before_rename) { project.full_path }
let!(:path_after_rename) { "#{project.path}-renamed" }
let!(:full_path_after_rename) { "#{project.full_path}-renamed" }
let!(:repo_before_rename) { project.repository.raw }
let!(:wiki_repo_before_rename) { project.wiki.repository.raw }
let(:repo_after_rename) do
Gitlab::Git::Repository.new(project.repository_storage, "#{full_path_after_rename}.git", nil, nil)
end
let(:wiki_repo_after_rename) do
Gitlab::Git::Repository.new(project.repository_storage, "#{full_path_after_rename}.wiki.git", nil, nil)
end
describe '#execute' do
let(:project) { create(:project, :repository, skip_disk_validation: true) }
let(:gitlab_shell) { Gitlab::Shell.new }
let(:hash) { Digest::SHA2.hexdigest(project.id.to_s) }
let(:hashed_prefix) { File.join('@hashed', hash[0..1], hash[2..3]) }
let(:hashed_path) { File.join(hashed_prefix, hash) }
let(:message) { "Repository #{full_path_before_rename} could not be renamed to #{full_path_after_rename}" }
before do
# Project#gitlab_shell returns a new instance of Gitlab::Shell on every
# call. This makes testing a bit easier.
allow(project).to receive(:gitlab_shell).and_return(gitlab_shell)
stub_application_setting(hashed_storage_enabled: true)
end
shared_examples 'logging and raising a RenameFailedError' do
it 'logs raises a RenameFailedError' do
expect_any_instance_of(described_class).to receive(:log_error).with(message)
expect { service_execute }
.to raise_error(described_class::RenameFailedError)
end
end
it 'renames a repository' do
stub_container_registry_config(enabled: false)
expect_any_instance_of(SystemHooksService)
.to receive(:execute_hooks_for)
.with(project, :rename)
expect(project).to receive(:expire_caches_before_rename)
service_execute
end
context 'when renaming or migrating fails' do
before do
allow_any_instance_of(::Projects::HashedStorage::MigrationService)
.to receive(:execute).and_return(false)
end
it_behaves_like 'logging and raising a RenameFailedError'
end
context 'container registry with images' do
let(:container_repository) { create(:container_repository) }
let(:message) do
"Project #{full_path_before_rename} cannot be renamed because images are " \
"present in its container registry"
end
before do
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: :any, tags: ['tag'])
project.container_repositories << container_repository
end
context 'when feature renaming_project_with_tags is disabled' do
before do
stub_feature_flags(renaming_project_with_tags: false)
end
it_behaves_like 'logging and raising a RenameFailedError'
end
context 'when Gitlab API is not supported' do
before do
allow(ContainerRegistry::GitlabApiClient).to receive(:supports_gitlab_api?).and_return(false)
end
it_behaves_like 'logging and raising a RenameFailedError'
end
context 'when Gitlab API Client is supported' do
before do
allow(ContainerRegistry::GitlabApiClient).to receive(:supports_gitlab_api?).and_return(true)
end
it 'renames the base repository in the registry' do
expect(ContainerRegistry::GitlabApiClient).to receive(:rename_base_repository_path)
.with(full_path_before_rename, name: path_after_rename).and_return(:ok)
service_execute
end
context 'when the base repository rename in the registry fails' do
before do
allow(ContainerRegistry::GitlabApiClient)
.to receive(:rename_base_repository_path).and_return(:bad_request)
end
let(:message) { 'Renaming the base repository in the registry failed with error bad_request.' }
it_behaves_like 'logging and raising a RenameFailedError'
end
end
end
context 'attachments' do
let(:uploader) { create(:upload, :issuable_upload, :with_file, model: project) }
let(:file_uploader) { build(:file_uploader, project: project) }
let(:legacy_storage_path) { File.join(file_uploader.root, legacy_storage.disk_path) }
let(:hashed_storage_path) { File.join(file_uploader.root, hashed_storage.disk_path) }
it 'keeps uploads folder location unchanged' do
expect_any_instance_of(Gitlab::UploadsTransfer).not_to receive(:rename_project)
service_execute
end
context 'when not rolled out' do
let(:project) { create(:project, :repository, storage_version: 1, skip_disk_validation: true) }
it 'moves attachments folder to hashed storage' do
expect(File.directory?(legacy_storage_path)).to be_truthy
expect(File.directory?(hashed_storage_path)).to be_falsey
service_execute
expect(project.reload.hashed_storage?(:attachments)).to be_truthy
expect(File.directory?(legacy_storage_path)).to be_falsey
expect(File.directory?(hashed_storage_path)).to be_truthy
end
end
end
it 'updates project full path in gitaly' do
service_execute
expect(project.repository.full_path).to eq(project.full_path)
end
it 'updates storage location' do
service_execute
expect(project.project_repository).to have_attributes(
disk_path: project.disk_path,
shard_name: project.repository_storage
)
end
context 'EventStore' do
let(:project) { create(:project, :repository, skip_disk_validation: true) }
it 'publishes a ProjectPathChangedEvent' do
expect { service_execute }
.to publish_event(Projects::ProjectPathChangedEvent)
.with(
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id,
old_path: full_path_before_rename,
new_path: full_path_after_rename
)
end
end
end
def service_execute
# AfterRenameService is called by UpdateService after a successful model.update
# the initialization will include before and after paths values
project.update!(path: path_after_rename)
described_class.new(project, path_before: path_before_rename, full_path_before: full_path_before_rename).execute
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveForksService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super && source_project.fork_network
Project.transaction do
move_fork_network_members
update_root_project
refresh_forks_count
success
end
end
private
# rubocop: disable CodeReuse/ActiveRecord
def move_fork_network_members
ForkNetworkMember.where(project: source_project).update_all(project_id: @project.id)
ForkNetworkMember.where(forked_from_project: source_project).update_all(forked_from_project_id: @project.id)
end
# rubocop: enable CodeReuse/ActiveRecord
# rubocop: disable CodeReuse/ActiveRecord
def update_root_project
# Update root network project
ForkNetwork.where(root_project: source_project).update_all(root_project_id: @project.id)
end
# rubocop: enable CodeReuse/ActiveRecord
def refresh_forks_count
Projects::ForksCountService.new(@project).refresh_cache
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveForksService, feature_category: :source_code_management do
include ProjectForksHelper
let!(:user) { create(:user) }
let!(:project_with_forks) { create(:project, namespace: user.namespace) }
let!(:target_project) { create(:project, namespace: user.namespace) }
let!(:lvl1_forked_project_1) { fork_project(project_with_forks, user) }
let!(:lvl1_forked_project_2) { fork_project(project_with_forks, user) }
let!(:lvl2_forked_project_1_1) { fork_project(lvl1_forked_project_1, user) }
let!(:lvl2_forked_project_1_2) { fork_project(lvl1_forked_project_1, user) }
subject { described_class.new(target_project, user) }
describe '#execute' do
context 'when moving a root forked project' do
it 'moves the descendant forks' do
expect(project_with_forks.forks.count).to eq 2
expect(target_project.forks.count).to eq 0
subject.execute(project_with_forks)
expect(project_with_forks.forks.count).to eq 0
expect(target_project.forks.count).to eq 2
expect(lvl1_forked_project_1.forked_from_project).to eq target_project
expect(lvl1_forked_project_1.fork_network_member.forked_from_project).to eq target_project
expect(lvl1_forked_project_2.forked_from_project).to eq target_project
expect(lvl1_forked_project_2.fork_network_member.forked_from_project).to eq target_project
end
it 'updates the fork network' do
expect(project_with_forks.fork_network.root_project).to eq project_with_forks
expect(project_with_forks.fork_network.fork_network_members.map(&:project)).to include project_with_forks
subject.execute(project_with_forks)
expect(target_project.reload.fork_network.root_project).to eq target_project
expect(target_project.fork_network.fork_network_members.map(&:project)).not_to include project_with_forks
end
end
context 'when moving a intermediate forked project' do
it 'moves the descendant forks' do
expect(lvl1_forked_project_1.forks.count).to eq 2
expect(target_project.forks.count).to eq 0
subject.execute(lvl1_forked_project_1)
expect(lvl1_forked_project_1.forks.count).to eq 0
expect(target_project.forks.count).to eq 2
expect(lvl2_forked_project_1_1.forked_from_project).to eq target_project
expect(lvl2_forked_project_1_1.fork_network_member.forked_from_project).to eq target_project
expect(lvl2_forked_project_1_2.forked_from_project).to eq target_project
expect(lvl2_forked_project_1_2.fork_network_member.forked_from_project).to eq target_project
end
it 'moves the ascendant fork' do
subject.execute(lvl1_forked_project_1)
expect(target_project.forked_from_project).to eq project_with_forks
expect(target_project.fork_network_member.forked_from_project).to eq project_with_forks
end
it 'does not update fork network' do
subject.execute(lvl1_forked_project_1)
expect(target_project.reload.fork_network.root_project).to eq project_with_forks
end
end
context 'when moving a leaf forked project' do
it 'moves the ascendant fork' do
subject.execute(lvl2_forked_project_1_1)
expect(target_project.forked_from_project).to eq lvl1_forked_project_1
expect(target_project.fork_network_member.forked_from_project).to eq lvl1_forked_project_1
end
it 'does not update fork network' do
subject.execute(lvl2_forked_project_1_1)
expect(target_project.reload.fork_network.root_project).to eq project_with_forks
end
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_forks) }.to raise_error(StandardError)
expect(project_with_forks.forks.count).to eq 2
expect(target_project.forks.count).to eq 0
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveUsersStarProjectsService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
user_stars = source_project.users_star_projects
return unless user_stars.any?
Project.transaction do
user_stars.update_all(project_id: @project.id)
@project.update(star_count: @project.starrers.with_state(:active).size)
source_project.update(star_count: source_project.starrers.with_state(:active).size)
success
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveUsersStarProjectsService, feature_category: :groups_and_projects do
let!(:user) { create(:user) }
let!(:project_with_stars) { create(:project, namespace: user.namespace) }
let!(:target_project) { create(:project, namespace: user.namespace) }
subject { described_class.new(target_project, user) }
describe '#execute' do
before do
create_list(:users_star_project, 2, project: project_with_stars)
end
it 'moves the user\'s stars from one project to another' do
project_with_stars.reload
target_project.reload
expect(project_with_stars.users_star_projects.count).to eq 2
expect(project_with_stars.star_count).to eq 2
expect(target_project.users_star_projects.count).to eq 0
expect(target_project.star_count).to eq 0
subject.execute(project_with_stars)
project_with_stars.reload
target_project.reload
expect(project_with_stars.users_star_projects.count).to eq 0
expect(project_with_stars.star_count).to eq 0
expect(target_project.users_star_projects.count).to eq 2
expect(target_project.star_count).to eq 2
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_stars) }.to raise_error(StandardError)
project_with_stars.reload
target_project.reload
expect(project_with_stars.users_star_projects.count).to eq 2
expect(project_with_stars.star_count).to eq 2
expect(target_project.users_star_projects.count).to eq 0
expect(target_project.star_count).to eq 0
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class AutocompleteService < BaseService
include LabelsAsHash
def issues
IssuesFinder.new(current_user, project_id: project.id, state: 'opened').execute.select([:iid, :title])
end
def milestones
finder_params = {
project_ids: [@project.id],
state: :active,
order: { due_date: :asc, title: :asc }
}
finder_params[:group_ids] = @project.group.self_and_ancestors.select(:id) if @project.group
MilestonesFinder.new(finder_params).execute.select([:iid, :title, :due_date])
end
def merge_requests
MergeRequestsFinder.new(current_user, project_id: project.id, state: 'opened').execute.select([:iid, :title])
end
def commands(noteable)
return [] unless noteable && current_user
QuickActions::InterpretService.new(project, current_user).available_commands(noteable)
end
def snippets
SnippetsFinder.new(current_user, project: project).execute.select([:id, :title])
end
def contacts(target)
available_contacts = Crm::ContactsFinder.new(current_user, group: project.group).execute
.select([:id, :email, :first_name, :last_name, :state])
contact_hashes = available_contacts.as_json
return contact_hashes unless target.is_a?(Issue)
ids = target.customer_relations_contacts.ids # rubocop:disable CodeReuse/ActiveRecord
contact_hashes.each do |hash|
hash[:set] = ids.include?(hash['id'])
end
contact_hashes
end
def labels_as_hash(target)
super(target, project_id: project.id, include_ancestor_groups: true)
end
end
end
Projects::AutocompleteService.prepend_mod_with('Projects::AutocompleteService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::AutocompleteService, feature_category: :groups_and_projects do
describe '#issues' do
describe 'confidential issues' do
let(:author) { create(:user) }
let(:assignee) { create(:user) }
let(:non_member) { create(:user) }
let(:member) { create(:user) }
let(:admin) { create(:admin) }
let(:project) { create(:project, :public) }
let!(:issue) { create(:issue, project: project, title: 'Issue 1') }
let!(:security_issue_1) { create(:issue, :confidential, project: project, title: 'Security issue 1', author: author) }
let!(:security_issue_2) { create(:issue, :confidential, title: 'Security issue 2', project: project, assignees: [assignee]) }
it 'does not list project confidential issues for guests' do
autocomplete = described_class.new(project, nil)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).not_to include security_issue_1.iid
expect(issues).not_to include security_issue_2.iid
expect(issues.count).to eq 1
end
it 'does not list project confidential issues for non project members' do
autocomplete = described_class.new(project, non_member)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).not_to include security_issue_1.iid
expect(issues).not_to include security_issue_2.iid
expect(issues.count).to eq 1
end
it 'does not list project confidential issues for project members with guest role' do
project.add_guest(member)
autocomplete = described_class.new(project, non_member)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).not_to include security_issue_1.iid
expect(issues).not_to include security_issue_2.iid
expect(issues.count).to eq 1
end
it 'lists project confidential issues for author' do
autocomplete = described_class.new(project, author)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).to include security_issue_1.iid
expect(issues).not_to include security_issue_2.iid
expect(issues.count).to eq 2
end
it 'lists project confidential issues for assignee' do
autocomplete = described_class.new(project, assignee)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).not_to include security_issue_1.iid
expect(issues).to include security_issue_2.iid
expect(issues.count).to eq 2
end
it 'lists project confidential issues for project members' do
project.add_developer(member)
autocomplete = described_class.new(project, member)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).to include security_issue_1.iid
expect(issues).to include security_issue_2.iid
expect(issues.count).to eq 3
end
context 'when admin mode is enabled', :enable_admin_mode do
it 'lists all project issues for admin', :enable_admin_mode do
autocomplete = described_class.new(project, admin)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).to include security_issue_1.iid
expect(issues).to include security_issue_2.iid
expect(issues.count).to eq 3
end
end
context 'when admin mode is disabled' do
it 'does not list project confidential issues for admin' do
autocomplete = described_class.new(project, admin)
issues = autocomplete.issues.map(&:iid)
expect(issues).to include issue.iid
expect(issues).not_to include security_issue_1.iid
expect(issues).not_to include security_issue_2.iid
expect(issues.count).to eq 1
end
end
end
end
describe '#milestones' do
let(:user) { create(:user) }
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let!(:group_milestone1) { create(:milestone, group: group, due_date: '2017-01-01', title: 'Second Title') }
let!(:group_milestone2) { create(:milestone, group: group, due_date: '2017-01-01', title: 'First Title') }
let!(:project_milestone) { create(:milestone, project: project, due_date: '2016-01-01') }
let(:milestone_titles) { described_class.new(project, user).milestones.map(&:title) }
it 'includes project and group milestones and sorts them correctly' do
expect(milestone_titles).to eq([project_milestone.title, group_milestone2.title, group_milestone1.title])
end
it 'does not include closed milestones' do
group_milestone1.close
expect(milestone_titles).to eq([project_milestone.title, group_milestone2.title])
end
it 'does not include milestones from other projects in the group' do
other_project = create(:project, group: group)
project_milestone.update!(project: other_project)
expect(milestone_titles).to eq([group_milestone2.title, group_milestone1.title])
end
context 'with nested groups' do
let(:subgroup) { create(:group, :public, parent: group) }
let!(:subgroup_milestone) { create(:milestone, group: subgroup) }
before do
project.update!(namespace: subgroup)
end
it 'includes project milestones and all acestors milestones' do
expect(milestone_titles).to match_array(
[project_milestone.title, group_milestone2.title, group_milestone1.title, subgroup_milestone.title]
)
end
end
end
describe '#contacts' do
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group, :crm_enabled) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:contact_1) { create(:contact, group: group) }
let_it_be(:contact_2) { create(:contact, group: group) }
let_it_be(:contact_3) { create(:contact, :inactive, group: group) }
let(:issue) { nil }
subject { described_class.new(project, user).contacts(issue).as_json }
before do
group.add_developer(user)
end
it 'returns CRM contacts from group' do
expected_contacts = [
{ 'id' => contact_1.id, 'email' => contact_1.email,
'first_name' => contact_1.first_name, 'last_name' => contact_1.last_name, 'state' => contact_1.state },
{ 'id' => contact_2.id, 'email' => contact_2.email,
'first_name' => contact_2.first_name, 'last_name' => contact_2.last_name, 'state' => contact_2.state },
{ 'id' => contact_3.id, 'email' => contact_3.email,
'first_name' => contact_3.first_name, 'last_name' => contact_3.last_name, 'state' => contact_3.state }
]
expect(subject).to match_array(expected_contacts)
end
context 'some contacts are already assigned to the issue' do
let(:issue) { create(:issue, project: project) }
before do
issue.customer_relations_contacts << [contact_2, contact_3]
end
it 'marks already assigned contacts as set' do
expected_contacts = [
{ 'id' => contact_1.id, 'email' => contact_1.email,
'first_name' => contact_1.first_name, 'last_name' => contact_1.last_name, 'state' => contact_1.state, 'set' => false },
{ 'id' => contact_2.id, 'email' => contact_2.email,
'first_name' => contact_2.first_name, 'last_name' => contact_2.last_name, 'state' => contact_2.state, 'set' => true },
{ 'id' => contact_3.id, 'email' => contact_3.email,
'first_name' => contact_3.first_name, 'last_name' => contact_3.last_name, 'state' => contact_3.state, 'set' => true }
]
expect(subject).to match_array(expected_contacts)
end
end
end
describe '#labels_as_hash' do
def expect_labels_to_equal(labels, expected_labels)
expect(labels.size).to eq(expected_labels.size)
extract_title = lambda { |label| label['title'] }
expect(labels.map(&extract_title)).to match_array(expected_labels.map(&extract_title))
end
let(:user) { create(:user) }
let(:group) { create(:group, :nested) }
let!(:sub_group) { create(:group, parent: group) }
let(:project) { create(:project, :public, group: group) }
let(:issue) { create(:issue, project: project) }
let!(:label1) { create(:label, project: project) }
let!(:label2) { create(:label, project: project) }
let!(:sub_group_label) { create(:group_label, group: sub_group) }
let!(:parent_group_label) { create(:group_label, group: group.parent, group_id: group.id) }
before do
create(:group_member, group: group, user: user)
end
it 'returns labels from project and ancestor groups' do
service = described_class.new(project, user)
results = service.labels_as_hash(nil)
expected_labels = [label1, label2, parent_group_label]
expect_labels_to_equal(results, expected_labels)
end
context 'some labels are already assigned' do
before do
issue.labels << label1
end
it 'marks already assigned as set' do
service = described_class.new(project, user)
results = service.labels_as_hash(issue)
expected_labels = [label1, label2, parent_group_label]
expect_labels_to_equal(results, expected_labels)
assigned_label_titles = issue.labels.map(&:title)
results.each do |hash|
if assigned_label_titles.include?(hash['title'])
expect(hash[:set]).to eq(true)
else
expect(hash.key?(:set)).to eq(false)
end
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# Projects::BranchesByModeService uses Gitaly page-token pagination
# in order to optimally fetch branches.
# The drawback of the page-token pagination is that it doesn't provide
# an option of going to the previous page of the collection.
# That's why we need to fall back to offset pagination when previous page
# is requested.
class Projects::BranchesByModeService
include Gitlab::Routing
attr_reader :project, :params
def initialize(project, params = {})
@project = project
@params = params
end
def execute
return fetch_branches_via_gitaly_pagination if use_gitaly_pagination?
fetch_branches_via_offset_pagination
end
private
def mode
params[:mode]
end
def by_mode(branches)
return branches unless %w[active stale].include?(mode)
branches.select { |b| b.state.to_s == mode }
end
def use_gitaly_pagination?
return false if params[:page].present? || params[:search].present?
Feature.enabled?(:branch_list_keyset_pagination, project)
end
def fetch_branches_via_offset_pagination
branches = BranchesFinder.new(project.repository, params).execute
branches = Kaminari.paginate_array(by_mode(branches)).page(params[:page])
branches_with_links(branches, last_page: branches.last_page?)
end
def fetch_branches_via_gitaly_pagination
per_page = Kaminari.config.default_per_page
options = params.merge(per_page: per_page + 1, page_token: params[:page_token])
branches = BranchesFinder.new(project.repository, options).execute(gitaly_pagination: true)
# Branch is stale if it hasn't been updated for 3 months
# This logic is specified in Gitlab Rails and isn't specified in Gitaly
# To display stale branches we fetch branches sorted as most-stale-at-the-top
# If the result contains active branches we filter them out and define that no more stale branches left
# Same logic applies to fetching active branches
branches = by_mode(branches)
last_page = branches.size <= per_page
branches = branches.take(per_page) # rubocop:disable CodeReuse/ActiveRecord
branches_with_links(branches, last_page: last_page)
end
def branches_with_links(branches, last_page:)
# To fall back to offset pagination we need to track current page via offset param
# And increase it whenever we go to the next page
previous_offset = params[:offset].to_i
previous_path = nil
next_path = nil
return [branches, previous_path, next_path] if branches.blank?
unless last_page
next_path = project_branches_filtered_path(project, state: mode, page_token: branches.last.name, sort: params[:sort], offset: previous_offset + 1)
end
if previous_offset > 0
previous_path = project_branches_filtered_path(project, state: mode, sort: params[:sort], page: previous_offset, offset: previous_offset - 1)
end
[branches, previous_path, next_path]
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::BranchesByModeService, feature_category: :source_code_management do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :repository) }
let(:finder) { described_class.new(project, params) }
let(:params) { { mode: 'all' } }
subject { finder.execute }
describe '#execute' do
context 'page is passed' do
let(:page) { (TestEnv::BRANCH_SHA.length.to_f / Kaminari.config.default_per_page).ceil }
let(:params) { { page: page, mode: 'all', offset: page - 1 } }
it 'uses offset pagination' do
expect(finder).to receive(:fetch_branches_via_offset_pagination).and_call_original
branches, prev_page, next_page = subject
remaining = TestEnv::BRANCH_SHA.length % Kaminari.config.default_per_page
expect(branches.size).to eq(remaining > 0 ? remaining : 20)
expect(next_page).to be_nil
expect(prev_page).to eq("/#{project.full_path}/-/branches/all?offset=#{page - 2}&page=#{page - 1}")
end
context 'but the page does not contain any branches' do
let(:params) { { page: 100, mode: 'all' } }
it 'uses offset pagination' do
expect(finder).to receive(:fetch_branches_via_offset_pagination).and_call_original
branches, prev_page, next_page = subject
expect(branches).to eq([])
expect(next_page).to be_nil
expect(prev_page).to be_nil
end
end
end
context 'search is passed' do
let(:params) { { search: 'feature' } }
it 'uses offset pagination' do
expect(finder).to receive(:fetch_branches_via_offset_pagination).and_call_original
branches, prev_page, next_page = subject
expect(branches.map(&:name)).to match_array(%w[feature feature_conflict])
expect(next_page).to be_nil
expect(prev_page).to be_nil
end
end
context 'branch_list_keyset_pagination is disabled' do
it 'uses offset pagination' do
stub_feature_flags(branch_list_keyset_pagination: false)
expect(finder).to receive(:fetch_branches_via_offset_pagination).and_call_original
branches, prev_page, next_page = subject
expected_page_token = ERB::Util.url_encode(TestEnv::BRANCH_SHA.sort[19][0])
expect(branches.size).to eq(20)
expect(next_page).to eq("/#{project.full_path}/-/branches/all?offset=1&page_token=#{expected_page_token}")
expect(prev_page).to be_nil
end
end
context 'uses gitaly pagination' do
before do
expect(finder).to receive(:fetch_branches_via_gitaly_pagination).and_call_original
end
it 'returns branches for the first page' do
branches, prev_page, next_page = subject
expected_page_token = ERB::Util.url_encode(TestEnv::BRANCH_SHA.sort[19][0])
expect(branches.size).to eq(20)
expect(next_page).to eq("/#{project.full_path}/-/branches/all?offset=1&page_token=#{expected_page_token}")
expect(prev_page).to be_nil
end
context 'when second page is requested' do
let(:page_token) { 'conflict-resolvable' }
let(:params) { { page_token: page_token, mode: 'all', sort: 'name_asc', offset: 1 } }
it 'returns branches for the first page' do
branches, prev_page, next_page = subject
branch_index = TestEnv::BRANCH_SHA.sort.find_index { |a| a[0] == page_token }
expected_page_token = ERB::Util.url_encode(TestEnv::BRANCH_SHA.sort[20 + branch_index][0])
expect(branches.size).to eq(20)
expect(next_page).to eq("/#{project.full_path}/-/branches/all?offset=2&page_token=#{expected_page_token}&sort=name_asc")
expect(prev_page).to eq("/#{project.full_path}/-/branches/all?offset=0&page=1&sort=name_asc")
end
end
context 'when last page is requested' do
let(:page_token) { TestEnv::BRANCH_SHA.sort[-16][0] }
let(:params) { { page_token: page_token, mode: 'all', sort: 'name_asc', offset: 4 } }
it 'returns branches after the specified branch' do
branches, prev_page, next_page = subject
expect(branches.size).to eq(15)
expect(next_page).to be_nil
expect(prev_page).to eq("/#{project.full_path}/-/branches/all?offset=3&page=4&sort=name_asc")
end
end
end
context 'filter by mode' do
let(:stale) { double(state: 'stale') }
let(:active) { double(state: 'active') }
before do
allow_next_instance_of(BranchesFinder) do |instance|
allow(instance).to receive(:execute).and_return([stale, active])
end
end
context 'stale' do
let(:params) { { mode: 'stale' } }
it 'returns stale branches' do
is_expected.to eq([[stale], nil, nil])
end
end
context 'active' do
let(:params) { { mode: 'active' } }
it 'returns active branches' do
is_expected.to eq([[active], nil, nil])
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# This service is an adapter used to for the GitLab Import feature, and
# creating a project from a template.
# The latter will under the hood just import an archive supplied by GitLab.
module Projects
class GitlabProjectsImportService
include Gitlab::Utils::StrongMemoize
include Gitlab::TemplateHelper
attr_reader :current_user, :params
def initialize(user, import_params, override_params = nil)
@current_user = user
@params = import_params.dup
@override_params = override_params
end
def execute
prepare_template_environment(template_file)
prepare_import_params
::Projects::CreateService.new(current_user, params).execute
end
private
def overwrite_project?
overwrite? && project_with_same_full_path?
end
def project_with_same_full_path?
Project.find_by_full_path(project_path).present?
end
# rubocop: disable CodeReuse/ActiveRecord
def current_namespace
strong_memoize(:current_namespace) do
Namespace.find_by(id: params[:namespace_id]) || current_user.namespace
end
end
# rubocop: enable CodeReuse/ActiveRecord
def project_path
"#{current_namespace.full_path}/#{params[:path]}"
end
def overwrite?
strong_memoize(:overwrite) do
params.delete(:overwrite)
end
end
def template_file
strong_memoize(:template_file) do
params.delete(:file)
end
end
def prepare_import_params
data = {}
data[:override_params] = @override_params if @override_params
if overwrite_project?
data[:original_path] = params[:path]
params[:path] += "-#{tmp_filename}"
end
if template_file
data[:sample_data] = params.delete(:sample_data) if params.key?(:sample_data)
params[:import_type] = 'gitlab_project'
end
params[:import_data] = { data: data } if data.present?
end
end
end
Projects::GitlabProjectsImportService.prepend_mod_with('Projects::GitlabProjectsImportService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::GitlabProjectsImportService, feature_category: :importers do
let_it_be(:namespace) { create(:namespace) }
let(:path) { 'test-path' }
let(:file) { fixture_file_upload('spec/fixtures/project_export.tar.gz') }
let(:overwrite) { false }
let(:import_params) { { namespace_id: namespace.id, path: path, file: file, overwrite: overwrite } }
subject { described_class.new(namespace.owner, import_params) }
describe '#execute' do
it_behaves_like 'gitlab projects import validations'
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for counting and caching the number of all merge requests of
# a project.
class AllMergeRequestsCountService < Projects::CountService
def relation_for_count
@project.merge_requests
end
def cache_key_name
'all_merge_requests_count'
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::AllMergeRequestsCountService, :use_clean_rails_memory_store_caching, feature_category: :groups_and_projects do
let_it_be(:project) { create(:project) }
subject { described_class.new(project) }
it_behaves_like 'a counter caching service'
describe '#count' do
it 'returns the number of all merge requests' do
create(:merge_request, :opened, source_project: project, target_project: project)
create(:merge_request, :closed, source_project: project, target_project: project)
create(:merge_request, :merged, source_project: project, target_project: project)
expect(subject.count).to eq(3)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for counting and caching the number of all issues of a
# project.
class AllIssuesCountService < Projects::CountService
def relation_for_count
@project.issues
end
def cache_key_name
'all_issues_count'
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::AllIssuesCountService, :use_clean_rails_memory_store_caching, feature_category: :groups_and_projects do
let_it_be(:group) { create(:group, :public) }
let_it_be(:project) { create(:project, :public, namespace: group) }
let_it_be(:banned_user) { create(:user, :banned) }
subject { described_class.new(project) }
it_behaves_like 'a counter caching service'
describe '#count' do
it 'returns the number of all issues' do
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
create(:issue, :opened, author: banned_user, project: project)
create(:issue, :closed, project: project)
expect(subject.count).to eq(4)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class ParticipantsService < BaseService
include Users::ParticipableService
def execute(noteable)
@noteable = noteable
participants =
noteable_owner +
participants_in_noteable +
all_members +
project_members +
groups
render_participants_as_hash(participants.uniq)
end
def project_members
@project_members ||= sorted(project.authorized_users)
end
def all_members
return [] if Feature.enabled?(:disable_all_mention)
[{ username: "all", name: "All Project and Group Members", count: project_members.count }]
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ParticipantsService, feature_category: :groups_and_projects do
describe '#execute' do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :public) }
let_it_be(:noteable) { create(:issue, project: project) }
before_all do
project.add_developer(user)
stub_feature_flags(disable_all_mention: false)
end
def run_service
described_class.new(project, user).execute(noteable)
end
it 'returns results in correct order' do
group = create(:group).tap { |g| g.add_owner(user) }
expect(run_service.pluck(:username)).to eq([
noteable.author.username, 'all', user.username, group.full_path
])
end
it 'includes `All Project and Group Members`' do
expect(run_service).to include(a_hash_including({ username: "all", name: "All Project and Group Members" }))
end
context 'N+1 checks' do
before do
run_service # warmup, runs table cache queries and create queries
BatchLoader::Executor.clear_current
end
it 'avoids N+1 UserDetail queries' do
project.add_developer(create(:user))
control_count = ActiveRecord::QueryRecorder.new { run_service.to_a }.count
BatchLoader::Executor.clear_current
project.add_developer(create(:user, status: build(:user_status, availability: :busy)))
expect { run_service.to_a }.not_to exceed_query_limit(control_count)
end
it 'avoids N+1 groups queries' do
group_1 = create(:group)
group_1.add_owner(user)
control_count = ActiveRecord::QueryRecorder.new { run_service }.count
BatchLoader::Executor.clear_current
group_2 = create(:group)
group_2.add_owner(user)
expect { run_service }.not_to exceed_query_limit(control_count)
end
end
it 'does not return duplicate author' do
participants = run_service
expect(participants.count { |p| p[:username] == noteable.author.username }).to eq 1
end
describe 'group items' do
subject(:group_items) { run_service.select { |hash| hash[:type].eql?('Group') } }
describe 'group user counts' do
let(:group_1) { create(:group) }
let(:group_2) { create(:group) }
before do
group_1.add_owner(user)
group_1.add_owner(create(:user))
group_2.add_owner(user)
create(:group_member, :access_request, group: group_2, user: create(:user))
end
it 'returns correct user counts for groups' do
expect(group_items).to contain_exactly(
a_hash_including(name: group_1.full_name, count: 2),
a_hash_including(name: group_2.full_name, count: 1)
)
end
end
describe 'avatar_url' do
let(:group) { create(:group, avatar: fixture_file_upload('spec/fixtures/dk.png')) }
before do
group.add_owner(user)
end
it 'returns an url for the avatar' do
expect(group_items.size).to eq 1
expect(group_items.first[:avatar_url]).to eq("/uploads/-/system/group/avatar/#{group.id}/dk.png")
end
it 'returns an url for the avatar with relative url' do
stub_config_setting(relative_url_root: '/gitlab')
stub_config_setting(url: Settings.send(:build_gitlab_url))
expect(group_items.size).to eq 1
expect(group_items.first[:avatar_url]).to eq("/gitlab/uploads/-/system/group/avatar/#{group.id}/dk.png")
end
end
context 'with subgroups' do
let(:group_1) { create(:group, path: 'bb') }
let(:group_2) { create(:group, path: 'zz') }
let(:subgroup) { create(:group, path: 'aa', parent: group_1) }
before do
group_1.add_owner(user)
group_2.add_owner(user)
subgroup.add_owner(user)
end
it 'returns results ordered by full path' do
expect(group_items.pluck(:username)).to eq([
group_1.full_path, subgroup.full_path, group_2.full_path
])
end
end
end
context 'when `disable_all_mention` FF is enabled' do
before do
stub_feature_flags(disable_all_mention: true)
end
it 'does not include `All Project and Group Members`' do
expect(run_service).not_to include(a_hash_including({ username: "all", name: "All Project and Group Members" }))
end
end
end
describe '#project_members' do
subject(:usernames) { service.project_members.map { |member| member[:username] } }
context 'when there is a project in group namespace' do
let_it_be(:public_group) { create(:group, :public) }
let_it_be(:public_project, reload: true) { create(:project, :public, namespace: public_group) }
let_it_be(:public_group_owner) { create(:user) }
let(:service) { described_class.new(public_project, create(:user)) }
before do
public_group.add_owner(public_group_owner)
end
it 'returns members of a group' do
expect(usernames).to include(public_group_owner.username)
end
end
context 'when there is a private group and a public project' do
let_it_be(:public_group) { create(:group, :public) }
let_it_be(:private_group) { create(:group, :private, :nested) }
let_it_be(:public_project, reload: true) { create(:project, :public, namespace: public_group) }
let_it_be(:project_issue) { create(:issue, project: public_project) }
let_it_be(:public_group_owner) { create(:user) }
let_it_be(:private_group_member) { create(:user) }
let_it_be(:public_project_maintainer) { create(:user) }
let_it_be(:private_group_owner) { create(:user) }
let_it_be(:group_ancestor_owner) { create(:user) }
before_all do
public_group.add_owner public_group_owner
private_group.add_developer private_group_member
public_project.add_maintainer public_project_maintainer
private_group.add_owner private_group_owner
private_group.parent.add_owner group_ancestor_owner
end
context 'when the private group is invited to the public project' do
before_all do
create(:project_group_link, group: private_group, project: public_project)
end
let(:service) { described_class.new(public_project, create(:user)) }
it 'does not return the private group' do
expect(usernames).not_to include(private_group.name)
end
it 'returns private group members' do
expect(usernames).to include(private_group_member.username)
end
it 'returns the project maintainer' do
expect(usernames).to include(public_project_maintainer.username)
end
it 'returns project members from an invited public group' do
invited_public_group = create(:group, :public)
invited_public_group.add_owner create(:user)
create(:project_group_link, group: invited_public_group, project: public_project)
expect(usernames).to include(invited_public_group.users.first.username)
end
it 'returns members of the ancestral groups of the private group' do
expect(usernames).to include(group_ancestor_owner.username)
end
it 'returns invited group members of the private group' do
invited_group = create(:group, :public)
create(:group_group_link, shared_group: private_group, shared_with_group: invited_group)
other_user = create(:user)
invited_group.add_guest(other_user)
expect(usernames).to include(other_user.username)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveNotificationSettingsService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
Project.transaction do
move_notification_settings
remove_remaining_notification_settings if remove_remaining_elements
success
end
end
private
def move_notification_settings
non_existent_notifications.update_all(source_id: @project.id)
end
# Remove remaining notification settings from source_project
def remove_remaining_notification_settings
source_project.notification_settings.destroy_all # rubocop: disable Cop/DestroyAll
end
# Get users of current notification_settings
def users_in_target_project
@project.notification_settings.select(:user_id)
end
# Look for notification_settings in source_project that are not in the target project
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_notifications
source_project.notification_settings
.select(:id)
.where.not(user_id: users_in_target_project)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveNotificationSettingsService, feature_category: :groups_and_projects do
let(:user) { create(:user) }
let(:project_with_notifications) { create(:project, namespace: user.namespace) }
let(:target_project) { create(:project, namespace: user.namespace) }
subject { described_class.new(target_project, user) }
describe '#execute' do
context 'with notification settings' do
before do
create_list(:notification_setting, 2, source: project_with_notifications)
end
it 'moves the user\'s notification settings from one project to another' do
expect(project_with_notifications.notification_settings.count).to eq 3
expect(target_project.notification_settings.count).to eq 1
subject.execute(project_with_notifications)
expect(project_with_notifications.notification_settings.count).to eq 0
expect(target_project.notification_settings.count).to eq 3
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_notifications) }.to raise_error(StandardError)
expect(project_with_notifications.notification_settings.count).to eq 3
expect(target_project.notification_settings.count).to eq 1
end
end
it 'does not move existent notification settings in the current project' do
expect(project_with_notifications.notification_settings.count).to eq 1
expect(target_project.notification_settings.count).to eq 1
expect(user.notification_settings.count).to eq 2
subject.execute(project_with_notifications)
expect(user.notification_settings.count).to eq 1
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining notification settings' do
subject.execute(project_with_notifications, **options)
expect(project_with_notifications.notification_settings.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class OverwriteProjectService < BaseService
def execute(source_project)
return unless source_project && source_project.namespace_id == @project.namespace_id
start_time = ::Gitlab::Metrics::System.monotonic_time
original_source_name = source_project.name
original_source_path = source_project.path
tmp_source_name, tmp_source_path = tmp_source_project_name(source_project)
move_relationships_between(source_project, @project)
source_project_rename = rename_project(source_project, tmp_source_name, tmp_source_path)
if source_project_rename[:status] == :error
raise 'Source project rename failed during project overwrite'
end
new_project_rename = rename_project(@project, original_source_name, original_source_path)
if new_project_rename[:status] == :error
rename_project(source_project, original_source_name, original_source_path)
raise 'New project rename failed during project overwrite'
end
schedule_source_project_deletion(source_project)
@project
rescue StandardError => e
move_relationships_between(@project, source_project)
remove_source_project_from_fork_network(source_project)
raise e
ensure
track_service(start_time, source_project, e)
end
private
def track_service(start_time, source_project, exception)
return if ::Feature.disabled?(:project_overwrite_service_tracking, source_project)
duration = ::Gitlab::Metrics::System.monotonic_time - start_time
Gitlab::AppJsonLogger.info(
class: self.class.name,
namespace_id: source_project.namespace_id,
project_id: source_project.id,
duration_s: duration.to_f,
error: exception.class.name
)
end
def move_relationships_between(source_project, target_project)
options = { remove_remaining_elements: false }
Project.transaction do
::Projects::MoveUsersStarProjectsService.new(target_project, @current_user).execute(source_project, **options)
::Projects::MoveAccessService.new(target_project, @current_user).execute(source_project, **options)
::Projects::MoveDeployKeysProjectsService.new(target_project, @current_user).execute(source_project, **options)
::Projects::MoveNotificationSettingsService.new(target_project, @current_user).execute(source_project, **options)
::Projects::MoveForksService.new(target_project, @current_user).execute(source_project, **options)
::Projects::MoveLfsObjectsProjectsService.new(target_project, @current_user).execute(source_project, **options)
add_source_project_to_fork_network(source_project)
end
end
def schedule_source_project_deletion(source_project)
::Projects::DestroyService.new(source_project, @current_user).async_execute
end
def rename_project(target_project, name, path)
::Projects::UpdateService.new(target_project, @current_user, { name: name, path: path }).execute
end
def add_source_project_to_fork_network(source_project)
return if source_project == @project
return unless fork_network
# Because they have moved all references in the fork network from the source_project
# we won't be able to query the database (only through its cached data),
# for its former relationships. That's why we're adding it to the network
# as a fork of the target project
ForkNetworkMember.create!(
fork_network: fork_network,
project: source_project,
forked_from_project: @project
)
end
def remove_source_project_from_fork_network(source_project)
return unless fork_network
fork_member = ForkNetworkMember.find_by( # rubocop: disable CodeReuse/ActiveRecord
fork_network: fork_network,
project: source_project,
forked_from_project: @project)
fork_member&.destroy
end
def tmp_source_project_name(source_project)
random_string = SecureRandom.hex
tmp_name = "#{source_project.name}-old-#{random_string}"
tmp_path = "#{source_project.path}-old-#{random_string}"
[tmp_name, tmp_path]
end
def fork_network
@project.fork_network_member&.fork_network
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::OverwriteProjectService, feature_category: :groups_and_projects do
include ProjectForksHelper
let(:user) { create(:user) }
let(:project_from) { create(:project, namespace: user.namespace) }
let(:project_to) { create(:project, namespace: user.namespace) }
let!(:lvl1_forked_project_1) { fork_project(project_from, user) }
let!(:lvl1_forked_project_2) { fork_project(project_from, user) }
let!(:lvl2_forked_project_1_1) { fork_project(lvl1_forked_project_1, user) }
let!(:lvl2_forked_project_1_2) { fork_project(lvl1_forked_project_1, user) }
subject { described_class.new(project_to, user) }
before do
project_to.project_feature.reload
allow(project_to).to receive(:import_data).and_return(double(data: { 'original_path' => project_from.path }))
end
describe '#execute' do
shared_examples 'overwrite actions' do
it 'moves deploy keys' do
deploy_keys_count = project_from.deploy_keys_projects.count
subject.execute(project_from)
expect(project_to.deploy_keys_projects.count).to eq deploy_keys_count
end
it 'moves notification settings' do
notification_count = project_from.notification_settings.count
subject.execute(project_from)
expect(project_to.notification_settings.count).to eq notification_count
end
it 'moves users stars' do
stars_count = project_from.users_star_projects.count
subject.execute(project_from)
project_to.reload
expect(project_to.users_star_projects.count).to eq stars_count
expect(project_to.star_count).to eq stars_count
end
it 'moves project group links' do
group_links_count = project_from.project_group_links.count
subject.execute(project_from)
expect(project_to.project_group_links.count).to eq group_links_count
end
it 'moves memberships and authorizations' do
members_count = project_from.project_members.count
project_authorizations = project_from.project_authorizations.count
subject.execute(project_from)
expect(project_to.project_members.count).to eq members_count
expect(project_to.project_authorizations.count).to eq project_authorizations
end
context 'moves lfs objects relationships' do
before do
create_list(:lfs_objects_project, 3, project: project_from)
end
it do
lfs_objects_count = project_from.lfs_objects.count
subject.execute(project_from)
expect(project_to.lfs_objects.count).to eq lfs_objects_count
end
end
it 'schedules original project for deletion' do
expect_next_instance_of(Projects::DestroyService) do |service|
expect(service).to receive(:async_execute)
end
subject.execute(project_from)
end
it 'renames the project' do
original_path = project_from.full_path
subject.execute(project_from)
expect(project_to.full_path).to eq(original_path)
end
it 'renames source project to temp name' do
allow(SecureRandom).to receive(:hex).and_return('test')
subject.execute(project_from)
expect(project_from.full_path).to include('-old-test')
end
context 'when project rename fails' do
before do
expect(subject).to receive(:move_relationships_between).with(project_from, project_to)
expect(subject).to receive(:move_relationships_between).with(project_to, project_from)
end
context 'source rename' do
it 'moves relations back to source project and raises an exception' do
allow(subject).to receive(:rename_project).and_return(status: :error)
expect { subject.execute(project_from) }.to raise_error(StandardError, 'Source project rename failed during project overwrite')
end
end
context 'new project rename' do
it 'moves relations back, renames source project back to original name and raises' do
name = project_from.name
path = project_from.path
allow(subject).to receive(:rename_project).and_call_original
allow(subject).to receive(:rename_project).with(project_to, name, path).and_return(status: :error)
expect { subject.execute(project_from) }.to raise_error(StandardError, 'New project rename failed during project overwrite')
expect(project_from.name).to eq(name)
expect(project_from.path).to eq(path)
end
end
end
end
context 'when project does not have any relation' do
it_behaves_like 'overwrite actions'
end
context 'when project with elements' do
it_behaves_like 'overwrite actions' do
let(:maintainer_user) { create(:user) }
let(:reporter_user) { create(:user) }
let(:developer_user) { create(:user) }
let(:maintainer_group) { create(:group) }
let(:reporter_group) { create(:group) }
let(:developer_group) { create(:group) }
before do
create_list(:deploy_keys_project, 2, project: project_from)
create_list(:notification_setting, 2, source: project_from)
create_list(:users_star_project, 2, project: project_from)
project_from.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
project_from.project_group_links.create!(group: developer_group, group_access: Gitlab::Access::DEVELOPER)
project_from.project_group_links.create!(group: reporter_group, group_access: Gitlab::Access::REPORTER)
project_from.add_maintainer(maintainer_user)
project_from.add_developer(developer_user)
project_from.add_reporter(reporter_user)
end
end
end
context 'forks', :sidekiq_inline do
context 'when moving a root forked project' do
it 'moves the descendant forks' do
expect(project_from.forks.count).to eq 2
expect(project_to.forks.count).to eq 0
subject.execute(project_from)
expect(project_from.forks.count).to eq 0
expect(project_to.forks.count).to eq 2
expect(lvl1_forked_project_1.forked_from_project).to eq project_to
expect(lvl1_forked_project_1.fork_network_member.forked_from_project).to eq project_to
expect(lvl1_forked_project_2.forked_from_project).to eq project_to
expect(lvl1_forked_project_2.fork_network_member.forked_from_project).to eq project_to
end
it 'updates the fork network' do
expect(project_from.fork_network.root_project).to eq project_from
expect(project_from.fork_network.fork_network_members.map(&:project)).to include project_from
subject.execute(project_from)
expect(project_to.reload.fork_network.root_project).to eq project_to
expect(project_to.fork_network.fork_network_members.map(&:project)).not_to include project_from
end
end
context 'when moving a intermediate forked project' do
let(:project_to) { create(:project, namespace: lvl1_forked_project_1.namespace) }
it 'moves the descendant forks' do
expect(lvl1_forked_project_1.forks.count).to eq 2
expect(project_to.forks.count).to eq 0
subject.execute(lvl1_forked_project_1)
expect(lvl1_forked_project_1.forks.count).to eq 0
expect(project_to.forks.count).to eq 2
expect(lvl2_forked_project_1_1.forked_from_project).to eq project_to
expect(lvl2_forked_project_1_1.fork_network_member.forked_from_project).to eq project_to
expect(lvl2_forked_project_1_2.forked_from_project).to eq project_to
expect(lvl2_forked_project_1_2.fork_network_member.forked_from_project).to eq project_to
end
it 'moves the ascendant fork' do
subject.execute(lvl1_forked_project_1)
expect(project_to.reload.forked_from_project).to eq project_from
expect(project_to.fork_network_member.forked_from_project).to eq project_from
end
it 'does not update fork network' do
subject.execute(lvl1_forked_project_1)
expect(project_to.reload.fork_network.root_project).to eq project_from
end
end
end
context 'if an exception is raised' do
before do
allow(subject).to receive(:rename_project).and_raise(StandardError)
end
it 'rollbacks changes' do
updated_at = project_from.updated_at
expect { subject.execute(project_from) }.to raise_error(StandardError)
expect(Project.find(project_from.id)).not_to be_nil
expect(project_from.reload.updated_at.change(usec: 0)).to eq updated_at.change(usec: 0)
end
it 'removes fork network member' do
expect(ForkNetworkMember).to receive(:create!)
expect(ForkNetworkMember).to receive(:find_by)
expect(subject).to receive(:remove_source_project_from_fork_network).and_call_original
expect { subject.execute(project_from) }.to raise_error(StandardError)
expect(project_from.fork_network_member).to be_nil
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class CreateFromTemplateService < BaseService
include Gitlab::Utils::StrongMemoize
attr_reader :template_name
def initialize(user, params)
@current_user = user
@params = params.to_h.dup
@template_name = @params.delete(:template_name).presence
end
def execute
return project unless validate_template!
file = built_in_template&.file || sample_data_template&.file
override_params = params.dup
if built_in_template
params[:file] = built_in_template.file
elsif sample_data_template
params[:file] = sample_data_template.file
params[:sample_data] = true
end
GitlabProjectsImportService.new(current_user, params, override_params).execute
ensure
file&.close
end
private
def validate_template!
return true if built_in_template || sample_data_template
project.errors.add(:template_name, _("'%{template_name}' is unknown or invalid" % { template_name: template_name }))
false
end
def built_in_template
strong_memoize(:built_in_template) do
Gitlab::ProjectTemplate.find(template_name)
end
end
def sample_data_template
strong_memoize(:sample_data_template) do
Gitlab::SampleDataTemplate.find(template_name)
end
end
def project
@project ||= ::Project.new(namespace_id: params[:namespace_id])
end
end
end
Projects::CreateFromTemplateService.prepend_mod_with('Projects::CreateFromTemplateService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::CreateFromTemplateService, feature_category: :groups_and_projects do
let(:user) { create(:user) }
let(:template_name) { 'rails' }
let(:project_params) do
{
path: user.to_param,
template_name: template_name,
description: 'project description',
visibility_level: Gitlab::VisibilityLevel::PUBLIC
}
end
subject { described_class.new(user, project_params) }
it 'calls the importer service' do
import_service_double = double
allow(Projects::GitlabProjectsImportService).to receive(:new).and_return(import_service_double)
expect(import_service_double).to receive(:execute)
subject.execute
end
it 'returns the project that is created' do
project = subject.execute
expect(project).to be_saved
expect(project.import_scheduled?).to be(true)
end
context 'when template is not present' do
let(:template_name) { 'non_existent' }
let(:project) { subject.execute }
before do
expect(project).not_to be_saved
end
it 'does not set import set import type' do
expect(project.import_type).to be nil
end
it 'does not set import set import source' do
expect(project.import_source).to be nil
end
it 'is not scheduled' do
expect(project.import_scheduled?).to be(false)
end
it 'repository is empty' do
expect(project.repository.empty?).to be(true)
end
end
context 'the result project' do
before do
perform_enqueued_jobs do
@project = subject.execute
end
@project.reload
end
it 'overrides template description' do
expect(@project.description).to match('project description')
end
it 'overrides template visibility_level' do
expect(@project.visibility_level).to eq(Gitlab::VisibilityLevel::PUBLIC)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class DownloadService < BaseService
ALLOWLIST = [
/^[^.]+\.fogbugz.com$/
].freeze
def initialize(project, url)
@project = project
@url = url
end
def execute
return unless valid_url?(@url)
uploader = FileUploader.new(@project)
uploader.download!(@url)
uploader.store!
uploader.to_h
end
private
def valid_url?(url)
url && http?(url) && valid_domain?(url)
end
def http?(url)
url =~ /\A#{URI::DEFAULT_PARSER.make_regexp(%w[http https])}\z/
end
def valid_domain?(url)
host = URI.parse(url).host
ALLOWLIST.any? { |entry| entry === host }
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::DownloadService, feature_category: :groups_and_projects do
describe 'File service' do
before do
@user = create(:user)
@project = create(:project, creator_id: @user.id, namespace: @user.namespace)
end
context 'for a URL that is not on allowlist' do
before do
url = 'https://code.jquery.com/jquery-2.1.4.min.js'
@link_to_file = download_file(@project, url)
end
it { expect(@link_to_file).to eq(nil) }
end
context 'for URLs that are on the allowlist' do
before do
# `ssrf_filter` resolves the hostname. See https://github.com/carrierwaveuploader/carrierwave/commit/91714adda998bc9e8decf5b1f5d260d808761304
stub_request(:get, %r{http://[\d.]+/rails_sample.jpg}).to_return(body: File.read(Rails.root + 'spec/fixtures/rails_sample.jpg'))
stub_request(:get, %r{http://[\d.]+/doc_sample.txt}).to_return(body: File.read(Rails.root + 'spec/fixtures/doc_sample.txt'))
end
context 'an image file' do
before do
url = 'http://mycompany.fogbugz.com/rails_sample.jpg'
@link_to_file = download_file(@project, url)
end
it { expect(@link_to_file).to have_key(:alt) }
it { expect(@link_to_file).to have_key(:url) }
it { expect(@link_to_file[:url]).to match('rails_sample.jpg') }
it { expect(@link_to_file[:alt]).to eq('rails_sample') }
end
context 'a txt file' do
before do
url = 'http://mycompany.fogbugz.com/doc_sample.txt'
@link_to_file = download_file(@project, url)
end
it { expect(@link_to_file).to have_key(:alt) }
it { expect(@link_to_file).to have_key(:url) }
it { expect(@link_to_file[:url]).to match('doc_sample.txt') }
it { expect(@link_to_file[:alt]).to eq('doc_sample.txt') }
end
end
end
def download_file(repository, url)
Projects::DownloadService.new(repository, url).execute
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class RepositoryLanguagesService < BaseService
def execute
perform_language_detection unless project.detected_repository_languages?
persisted_repository_languages
end
private
def perform_language_detection
if persisted_repository_languages.blank?
::DetectRepositoryLanguagesWorker.perform_async(project.id)
else
project.update_column(:detected_repository_languages, true)
end
end
def persisted_repository_languages
project.repository_languages
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::RepositoryLanguagesService, feature_category: :source_code_management do
let(:service) { described_class.new(project, project.first_owner) }
context 'when detected_repository_languages flag is set' do
let(:project) { create(:project) }
context 'when a project is without detected programming languages' do
it 'schedules a worker and returns the empty result' do
expect(::DetectRepositoryLanguagesWorker).to receive(:perform_async).with(project.id)
expect(service.execute).to eq([])
end
end
context 'when a project is with detected programming languages' do
let!(:repository_language) { create(:repository_language, project: project) }
it 'does not schedule a worker and returns the detected languages' do
expect(::DetectRepositoryLanguagesWorker).not_to receive(:perform_async).with(project.id)
languages = service.execute
expect(languages.size).to eq(1)
expect(languages.last.attributes.values).to eq(
[project.id, repository_language.programming_language_id, repository_language.share]
)
end
it 'sets detected_repository_languages flag' do
expect { service.execute }.to change(project, :detected_repository_languages).from(nil).to(true)
end
end
end
context 'when detected_repository_languages flag is not set' do
let!(:repository_language) { create(:repository_language, project: project) }
let(:project) { create(:project, detected_repository_languages: true) }
let(:languages) { service.execute }
it 'returns repository languages' do
expect(languages.size).to eq(1)
expect(languages.last.attributes.values).to eq(
[project.id, repository_language.programming_language_id, repository_language.share]
)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# Projects::TransferService class
#
# Used to transfer a project to another namespace
#
# Ex.
# # Move project to namespace by user
# Projects::TransferService.new(project, user).execute(namespace)
#
module Projects
class TransferService < BaseService
include Gitlab::ShellAdapter
TransferError = Class.new(StandardError)
def log_project_transfer_success(project, new_namespace)
log_transfer(project, new_namespace, nil)
end
def log_project_transfer_error(project, new_namespace, error_message)
log_transfer(project, new_namespace, error_message)
end
def execute(new_namespace)
@new_namespace = new_namespace
if @new_namespace.blank?
raise TransferError, s_('TransferProject|Please select a new namespace for your project.')
end
if @new_namespace.id == project.namespace_id
raise TransferError, s_('TransferProject|Project is already in this namespace.')
end
unless allowed_transfer_project?(current_user, project)
raise TransferError, s_("TransferProject|You don't have permission to transfer this project.")
end
unless allowed_to_transfer_to_namespace?(current_user, @new_namespace)
raise TransferError, s_("TransferProject|You don't have permission to transfer projects into that namespace.")
end
@owner_of_personal_project_before_transfer = project.namespace.owner if project.personal?
transfer(project)
log_project_transfer_success(project, @new_namespace)
true
rescue Projects::TransferService::TransferError => ex
project.reset
project.errors.add(:new_namespace, ex.message)
log_project_transfer_error(project, @new_namespace, ex.message)
false
end
private
attr_reader :old_path, :new_path, :new_namespace, :old_namespace
def log_transfer(project, new_namespace, error_message = nil)
action = error_message.nil? ? "was" : "was not"
log_payload = {
message: "Project #{action} transferred to a new namespace",
project_id: project.id,
project_path: project.full_path,
project_namespace: project.namespace.full_path,
namespace_id: project.namespace_id,
new_namespace_id: new_namespace&.id,
new_project_namespace: new_namespace&.full_path,
error_message: error_message
}
if error_message.nil?
::Gitlab::AppLogger.info(log_payload)
else
::Gitlab::AppLogger.error(log_payload)
end
end
# rubocop: disable CodeReuse/ActiveRecord
def transfer(project)
@old_path = project.full_path
@old_group = project.group
@new_path = File.join(@new_namespace.try(:full_path) || '', project.path)
@old_namespace = project.namespace
if Project.where(namespace_id: @new_namespace.try(:id)).where('path = ? or name = ?', project.path, project.name).exists?
raise TransferError, s_("TransferProject|Project with same name or path in target namespace already exists")
end
if project.has_container_registry_tags?
# We currently don't support renaming repository if it contains tags in container registry
raise TransferError, s_('TransferProject|Project cannot be transferred, because tags are present in its container registry')
end
if !new_namespace_has_same_root?(project) && project.has_namespaced_npm_packages?
raise TransferError, s_("TransferProject|Root namespace can't be updated if the project has NPM packages scoped to the current root level namespace.")
end
proceed_to_transfer
end
# rubocop: enable CodeReuse/ActiveRecord
def new_namespace_has_same_root?(project)
new_namespace.root_ancestor == project.namespace.root_ancestor
end
def proceed_to_transfer
Gitlab::Database::QueryAnalyzers::PreventCrossDatabaseModification.temporary_ignore_tables_in_transaction(
%w[routes redirect_routes], url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/424282'
) do
Project.transaction do
project.expire_caches_before_rename(@old_path)
# Apply changes to the project
update_namespace_and_visibility(@new_namespace)
project.reconcile_shared_runners_setting!
project.save!
# Notifications
project.send_move_instructions(@old_path)
transfer_missing_group_resources(@old_group)
# Move uploads
move_project_uploads(project)
update_integrations
remove_paid_features
project.old_path_with_namespace = @old_path
update_repository_configuration(@new_path)
remove_issue_contacts
execute_system_hooks
end
end
update_pending_builds
post_update_hooks(project, @old_group)
rescue Exception # rubocop:disable Lint/RescueException
rollback_side_effects
raise
ensure
refresh_permissions
end
# Overridden in EE
def post_update_hooks(project, _old_group)
ensure_personal_project_owner_membership(project)
invalidate_personal_projects_counts
publish_event
end
# Overridden in EE
def remove_paid_features
end
def invalidate_personal_projects_counts
# If the project was moved out of a personal namespace,
# the cache of the namespace owner, before the transfer, should be cleared.
if @owner_of_personal_project_before_transfer.present?
@owner_of_personal_project_before_transfer.invalidate_personal_projects_count
end
# If the project has now moved into a personal namespace,
# the cache of the target namespace owner should be cleared.
project.invalidate_personal_projects_count_of_owner
end
def transfer_missing_group_resources(group)
Labels::TransferService.new(current_user, group, project).execute
Milestones::TransferService.new(current_user, group, project).execute
end
def allowed_transfer_project?(current_user, project)
current_user.can?(:change_namespace, project)
end
def allowed_to_transfer_to_namespace?(current_user, namespace)
current_user.can?(:transfer_projects, namespace)
end
def update_namespace_and_visibility(to_namespace)
# Apply new namespace id and visibility level
project.namespace = to_namespace
project.visibility_level = to_namespace.visibility_level unless project.visibility_level_allowed_by_group?
end
def update_repository_configuration(full_path)
project.set_full_path(gl_full_path: full_path)
project.track_project_repository
end
def ensure_personal_project_owner_membership(project)
# In case of personal projects, we want to make sure that
# a membership record with `OWNER` access level exists for the owner of the namespace.
return unless project.personal?
namespace_owner = project.namespace.owner
existing_membership_record = project.member(namespace_owner)
return if existing_membership_record.present? && existing_membership_record.access_level == Gitlab::Access::OWNER
project.add_owner(namespace_owner)
end
def refresh_permissions
# This ensures we only schedule 1 job for every user that has access to
# the namespaces.
user_ids = @old_namespace.user_ids_for_project_authorizations |
@new_namespace.user_ids_for_project_authorizations
AuthorizedProjectUpdate::ProjectRecalculateWorker.perform_async(project.id)
# Until we compare the inconsistency rates of the new specialized worker and
# the old approach, we still run AuthorizedProjectsWorker
# but with some delay and lower urgency as a safety net.
UserProjectAccessChangedService.new(user_ids).execute(
priority: UserProjectAccessChangedService::LOW_PRIORITY
)
end
def rollback_side_effects
project.reset
update_namespace_and_visibility(@old_namespace)
update_repository_configuration(@old_path)
end
def execute_system_hooks
system_hook_service.execute_hooks_for(project, :transfer)
end
def move_project_uploads(project)
return if project.hashed_storage?(:attachments)
Gitlab::UploadsTransfer.new.move_project(
project.path,
@old_namespace.full_path,
@new_namespace.full_path
)
end
def old_wiki_repo_path
"#{old_path}#{::Gitlab::GlRepository::WIKI.path_suffix}"
end
def new_wiki_repo_path
"#{new_path}#{::Gitlab::GlRepository::WIKI.path_suffix}"
end
def old_design_repo_path
"#{old_path}#{::Gitlab::GlRepository::DESIGN.path_suffix}"
end
def new_design_repo_path
"#{new_path}#{::Gitlab::GlRepository::DESIGN.path_suffix}"
end
def update_integrations
project.integrations.with_default_settings.delete_all
Integration.create_from_active_default_integrations(project, :project_id)
end
def update_pending_builds
::Ci::PendingBuilds::UpdateProjectWorker.perform_async(project.id, pending_builds_params)
end
def pending_builds_params
{
namespace_id: new_namespace.id,
namespace_traversal_ids: new_namespace.traversal_ids
}
end
def remove_issue_contacts
return unless @old_group&.root_ancestor != @new_namespace&.root_ancestor
CustomerRelations::IssueContact.delete_for_project(project.id)
end
def publish_event
event = ::Projects::ProjectTransferedEvent.new(data: {
project_id: project.id,
old_namespace_id: old_namespace.id,
old_root_namespace_id: old_namespace.root_ancestor.id,
new_namespace_id: new_namespace.id,
new_root_namespace_id: new_namespace.root_ancestor.id
})
Gitlab::EventStore.publish(event)
end
end
end
Projects::TransferService.prepend_mod_with('Projects::TransferService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::TransferService, feature_category: :groups_and_projects do
let_it_be(:group) { create(:group) }
let_it_be(:user) { create(:user) }
let_it_be(:group_integration) { create(:integrations_slack, :group, group: group, webhook: 'http://group.slack.com') }
let(:project) { create(:project, :repository, :legacy_storage, namespace: user.namespace) }
let(:target) { group }
let(:executor) { user }
subject(:execute_transfer) { described_class.new(project, executor).execute(target).tap { project.reload } }
context 'with npm packages' do
before do
group.add_owner(user)
end
subject(:transfer_service) { described_class.new(project, user) }
let!(:package) { create(:npm_package, project: project, name: "@testscope/test") }
context 'with a root namespace change' do
it 'allow the transfer' do
expect(transfer_service.execute(group)).to be true
expect(project.errors[:new_namespace]).to be_empty
end
end
context 'with pending destruction package' do
before do
package.pending_destruction!
end
it 'allow the transfer' do
expect(transfer_service.execute(group)).to be true
expect(project.errors[:new_namespace]).to be_empty
end
end
context 'with namespaced packages present' do
let!(:package) { create(:npm_package, project: project, name: "@#{project.root_namespace.path}/test") }
it 'does not allow the transfer' do
expect(transfer_service.execute(group)).to be false
expect(project.errors[:new_namespace]).to include("Root namespace can't be updated if the project has NPM packages scoped to the current root level namespace.")
end
end
context 'without a root namespace change' do
let(:root) { create(:group) }
let(:group) { create(:group, parent: root) }
let(:other_group) { create(:group, parent: root) }
let(:project) { create(:project, :repository, namespace: group) }
before do
other_group.add_owner(user)
end
it 'allow the transfer' do
expect(transfer_service.execute(other_group)).to be true
expect(project.errors[:new_namespace]).to be_empty
end
end
end
context 'namespace -> namespace' do
before do
allow_next_instance_of(Gitlab::UploadsTransfer) do |service|
allow(service).to receive(:move_project).and_return(true)
end
group.add_owner(user)
end
it 'updates the namespace' do
transfer_result = execute_transfer
expect(transfer_result).to be_truthy
expect(project.namespace).to eq(group)
end
context 'EventStore' do
let(:group) do
create(:group, :nested).tap { |g| g.add_owner(user) }
end
let(:target) do
create(:group, :nested).tap { |g| g.add_owner(user) }
end
let(:project) { create(:project, namespace: group) }
it 'publishes a ProjectTransferedEvent' do
expect { execute_transfer }
.to publish_event(Projects::ProjectTransferedEvent)
.with(
project_id: project.id,
old_namespace_id: group.id,
old_root_namespace_id: group.root_ancestor.id,
new_namespace_id: target.id,
new_root_namespace_id: target.root_ancestor.id
)
end
end
context 'when project has an associated project namespace' do
it 'keeps project namespace in sync with project' do
transfer_result = execute_transfer
expect(transfer_result).to be_truthy
project_namespace_in_sync(group)
end
context 'when project is transferred to a deeper nested group' do
let(:parent_group) { create(:group) }
let(:sub_group) { create(:group, parent: parent_group) }
let(:sub_sub_group) { create(:group, parent: sub_group) }
let(:group) { sub_sub_group }
it 'keeps project namespace in sync with project' do
transfer_result = execute_transfer
expect(transfer_result).to be_truthy
project_namespace_in_sync(sub_sub_group)
end
end
end
end
context 'project in a group -> a personal namespace', :enable_admin_mode do
let(:project) { create(:project, :repository, :legacy_storage, group: group) }
let(:target) { user.namespace }
# We need to use an admin user as the executor because
# only an admin user has required permissions to transfer projects
# under _all_ the different circumstances specified below.
let(:executor) { create(:user, :admin) }
it 'executes the transfer to personal namespace successfully' do
execute_transfer
expect(project.namespace).to eq(user.namespace)
end
it 'invalidates personal_project_count cache of the the owner of the personal namespace' do
expect(user).to receive(:invalidate_personal_projects_count)
execute_transfer
end
context 'the owner of the namespace does not have a direct membership in the project residing in the group' do
it 'creates a project membership record for the owner of the namespace, with OWNER access level, after the transfer' do
execute_transfer
expect(project.members.owners.find_by(user_id: user.id)).to be_present
end
end
context 'the owner of the namespace has a direct membership in the project residing in the group' do
context 'that membership has an access level of OWNER' do
before do
project.add_owner(user)
end
it 'retains the project membership record for the owner of the namespace, with OWNER access level, after the transfer' do
execute_transfer
expect(project.members.owners.find_by(user_id: user.id)).to be_present
end
end
context 'that membership has an access level that is not OWNER' do
before do
project.add_developer(user)
end
it 'updates the project membership record for the owner of the namespace, to OWNER access level, after the transfer' do
execute_transfer
expect(project.members.owners.find_by(user_id: user.id)).to be_present
end
end
end
end
context 'personal namespace -> group', :enable_admin_mode do
let(:executor) { create(:admin) }
it 'invalidates personal_project_count cache of the the owner of the personal namespace' \
'that previously held the project' do
expect(user).to receive(:invalidate_personal_projects_count)
execute_transfer
end
end
context 'when transfer succeeds' do
before do
group.add_owner(user)
end
it 'sends notifications' do
expect_any_instance_of(NotificationService).to receive(:project_was_moved)
execute_transfer
end
it 'invalidates the user\'s personal_project_count cache' do
expect(user).to receive(:invalidate_personal_projects_count)
execute_transfer
end
it 'executes system hooks' do
expect_next_instance_of(described_class) do |service|
expect(service).to receive(:execute_system_hooks)
end
execute_transfer
end
it 'moves the disk path', :aggregate_failures do
old_path = project.repository.disk_path
old_full_path = project.repository.full_path
execute_transfer
project.reload_repository!
expect(project.repository.disk_path).not_to eq(old_path)
expect(project.repository.full_path).not_to eq(old_full_path)
expect(project.disk_path).not_to eq(old_path)
expect(project.disk_path).to start_with(group.path)
end
it 'updates project full path in gitaly' do
execute_transfer
expect(project.repository.full_path).to eq "#{group.full_path}/#{project.path}"
end
it 'updates storage location' do
execute_transfer
expect(project.project_repository).to have_attributes(
disk_path: "#{group.full_path}/#{project.path}",
shard_name: project.repository_storage
)
end
context 'with a project integration' do
let_it_be_with_reload(:project) { create(:project, namespace: user.namespace) }
let_it_be(:instance_integration) { create(:integrations_slack, :instance) }
let_it_be(:project_integration) { create(:integrations_slack, project: project) }
context 'when it inherits from instance_integration' do
before do
project_integration.update!(inherit_from_id: instance_integration.id, webhook: instance_integration.webhook)
end
it 'replaces inherited integrations', :aggregate_failures do
expect { execute_transfer }
.to change(Integration, :count).by(0)
.and change { project.slack_integration.webhook }.to eq(group_integration.webhook)
end
end
context 'with a custom integration' do
it 'does not update the integrations' do
expect { execute_transfer }.not_to change { project.slack_integration.webhook }
end
end
end
context 'when project has pending builds', :sidekiq_inline do
let!(:other_project) { create(:project) }
let!(:pending_build) { create(:ci_pending_build, project: project.reload) }
let!(:unrelated_pending_build) { create(:ci_pending_build, project: other_project) }
before do
group.reload
end
it 'updates pending builds for the project', :aggregate_failures do
execute_transfer
pending_build.reload
unrelated_pending_build.reload
expect(pending_build.namespace_id).to eq(group.id)
expect(pending_build.namespace_traversal_ids).to eq(group.traversal_ids)
expect(unrelated_pending_build.namespace_id).to eq(other_project.namespace_id)
expect(unrelated_pending_build.namespace_traversal_ids).to eq(other_project.namespace.traversal_ids)
end
end
end
context 'when transfer fails' do
let!(:original_path) { project.repository.relative_path }
def attempt_project_transfer(&block)
expect do
execute_transfer
end.to raise_error(ActiveRecord::ActiveRecordError)
end
before do
group.add_owner(user)
expect_any_instance_of(Labels::TransferService).to receive(:execute).and_raise(ActiveRecord::StatementInvalid, "PG ERROR")
end
it 'rolls back repo location' do
attempt_project_transfer
expect(project.repository.raw.exists?).to be(true)
expect(original_path).to eq project.repository.relative_path
end
it 'rolls back project full path in gitaly' do
attempt_project_transfer
expect(project.repository.full_path).to eq project.full_path
end
it "doesn't send move notifications" do
expect_any_instance_of(NotificationService).not_to receive(:project_was_moved)
attempt_project_transfer
end
it "doesn't run system hooks" do
attempt_project_transfer do |service|
expect(service).not_to receive(:execute_system_hooks)
end
end
it 'does not update storage location' do
attempt_project_transfer
expect(project.project_repository).to have_attributes(
disk_path: project.disk_path,
shard_name: project.repository_storage
)
end
it 'does not publish a ProjectTransferedEvent' do
expect { attempt_project_transfer }
.not_to publish_event(Projects::ProjectTransferedEvent)
end
context 'when project has pending builds', :sidekiq_inline do
let!(:other_project) { create(:project) }
let!(:pending_build) { create(:ci_pending_build, project: project.reload) }
let!(:unrelated_pending_build) { create(:ci_pending_build, project: other_project) }
it 'does not update pending builds for the project', :aggregate_failures do
attempt_project_transfer
pending_build.reload
unrelated_pending_build.reload
expect(pending_build.namespace_id).to eq(project.namespace_id)
expect(pending_build.namespace_traversal_ids).to eq(project.namespace.traversal_ids)
expect(unrelated_pending_build.namespace_id).to eq(other_project.namespace_id)
expect(unrelated_pending_build.namespace_traversal_ids).to eq(other_project.namespace.traversal_ids)
end
end
context 'when project has an associated project namespace' do
it 'keeps project namespace in sync with project' do
attempt_project_transfer
project_namespace_in_sync(user.namespace)
end
end
end
context 'namespace -> no namespace' do
let(:group) { nil }
it 'does not allow the project transfer' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
expect(project.errors.messages[:new_namespace].first).to eq 'Please select a new namespace for your project.'
end
context 'when project has an associated project namespace' do
it 'keeps project namespace in sync with project' do
transfer_result = execute_transfer
expect(transfer_result).to be false
project_namespace_in_sync(user.namespace)
end
end
end
context 'disallow transferring of project with tags' do
let(:container_repository) { create(:container_repository) }
before do
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: :any, tags: ['tag'])
project.container_repositories << container_repository
end
it 'does not allow the project transfer' do
expect(execute_transfer).to eq false
end
end
context 'namespace -> not allowed namespace' do
it 'does not allow the project transfer' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
end
end
context 'target namespace containing the same project name' do
before do
group.add_owner(user)
create(:project, name: project.name, group: group, path: 'other')
end
it 'does not allow the project transfer' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
expect(project.errors[:new_namespace]).to include('Project with same name or path in target namespace already exists')
end
end
context 'target namespace containing the same project path' do
before do
group.add_owner(user)
create(:project, name: 'other-name', path: project.path, group: group)
end
it 'does not allow the project transfer' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
expect(project.errors[:new_namespace]).to include('Project with same name or path in target namespace already exists')
end
end
context 'target namespace matches current namespace' do
let(:group) { user.namespace }
it 'does not allow project transfer' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
expect(project.errors[:new_namespace]).to include('Project is already in this namespace.')
end
end
context 'target namespace belongs to bot user', :enable_admin_mode do
let(:bot) { create(:user, :project_bot) }
let(:target) { bot.namespace }
let(:executor) { create(:user, :admin) }
it 'does not allow project transfer' do
namespace = project.namespace
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(namespace)
expect(project.errors[:new_namespace]).to include("You don't have permission to transfer projects into that namespace.")
end
end
context 'when user does not own the project' do
let(:project) { create(:project, :repository, :legacy_storage) }
before do
project.add_developer(user)
end
it 'does not allow project transfer to the target namespace' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.errors[:new_namespace]).to include("You don't have permission to transfer this project.")
end
end
context 'when user can create projects in the target namespace' do
let(:group) { create(:group, project_creation_level: ::Gitlab::Access::DEVELOPER_MAINTAINER_PROJECT_ACCESS) }
context 'but has only developer permissions in the target namespace' do
before do
group.add_developer(user)
end
it 'does not allow project transfer to the target namespace' do
transfer_result = execute_transfer
expect(transfer_result).to eq false
expect(project.namespace).to eq(user.namespace)
expect(project.errors[:new_namespace]).to include("You don't have permission to transfer projects into that namespace.")
end
end
end
context 'visibility level' do
let(:group) { create(:group, :internal) }
before do
group.add_owner(user)
end
context 'when namespace visibility level < project visibility level' do
let(:project) { create(:project, :public, :repository, namespace: user.namespace) }
before do
execute_transfer
end
it { expect(project.visibility_level).to eq(group.visibility_level) }
end
context 'when namespace visibility level > project visibility level' do
let(:project) { create(:project, :private, :repository, namespace: user.namespace) }
before do
execute_transfer
end
it { expect(project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE) }
end
end
context 'shared Runners group level configurations' do
using RSpec::Parameterized::TableSyntax
where(:project_shared_runners_enabled, :shared_runners_setting, :expected_shared_runners_enabled) do
true | :shared_runners_disabled_and_unoverridable | false
false | :shared_runners_disabled_and_unoverridable | false
true | :shared_runners_disabled_and_overridable | true
false | :shared_runners_disabled_and_overridable | false
true | :shared_runners_enabled | true
false | :shared_runners_enabled | false
end
with_them do
let(:project) { create(:project, :public, :repository, namespace: user.namespace, shared_runners_enabled: project_shared_runners_enabled) }
let(:group) { create(:group, shared_runners_setting) }
it 'updates shared runners based on the parent group' do
group.add_owner(user)
expect(execute_transfer).to eq(true)
expect(project.shared_runners_enabled).to eq(expected_shared_runners_enabled)
end
end
end
context 'missing group labels applied to issues or merge requests' do
it 'delegates transfer to Labels::TransferService' do
group.add_owner(user)
expect_next_instance_of(Labels::TransferService, user, project.group, project) do |labels_transfer_service|
expect(labels_transfer_service).to receive(:execute).once.and_call_original
end
execute_transfer
end
end
context 'missing group milestones applied to issues or merge requests' do
it 'delegates transfer to Milestones::TransferService' do
group.add_owner(user)
expect_next_instance_of(Milestones::TransferService, user, project.group, project) do |milestones_transfer_service|
expect(milestones_transfer_service).to receive(:execute).once.and_call_original
end
execute_transfer
end
end
context 'when hashed storage in use' do
let!(:project) { create(:project, :repository, namespace: user.namespace) }
let!(:old_disk_path) { project.repository.disk_path }
before do
group.add_owner(user)
end
it 'does not move the disk path', :aggregate_failures do
new_full_path = "#{group.full_path}/#{project.path}"
execute_transfer
project.reload_repository!
expect(project.repository).to have_attributes(
disk_path: old_disk_path,
full_path: new_full_path
)
expect(project.disk_path).to eq(old_disk_path)
end
it 'does not move the disk path when the transfer fails', :aggregate_failures do
old_full_path = project.full_path
expect_next_instance_of(described_class) do |service|
allow(service).to receive(:execute_system_hooks).and_raise('foo')
end
expect { execute_transfer }.to raise_error('foo')
project.reload_repository!
expect(project.repository).to have_attributes(
disk_path: old_disk_path,
full_path: old_full_path
)
expect(project.disk_path).to eq(old_disk_path)
end
end
describe 'refreshing project authorizations' do
let(:old_group) { create(:group) }
let!(:project) { create(:project, namespace: old_group) }
let(:member_of_old_group) { create(:user) }
let(:group) { create(:group) }
let(:member_of_new_group) { create(:user) }
before do
old_group.add_developer(member_of_old_group)
group.add_maintainer(member_of_new_group)
# Add the executing user as owner in both groups, so that
# transfer can be executed.
old_group.add_owner(user)
group.add_owner(user)
end
it 'calls AuthorizedProjectUpdate::ProjectRecalculateWorker to update project authorizations' do
expect(AuthorizedProjectUpdate::ProjectRecalculateWorker)
.to receive(:perform_async).with(project.id)
execute_transfer
end
it 'calls AuthorizedProjectUpdate::UserRefreshFromReplicaWorker with a delay to update project authorizations' do
stub_feature_flags(do_not_run_safety_net_auth_refresh_jobs: false)
user_ids = [user.id, member_of_old_group.id, member_of_new_group.id].map { |id| [id] }
expect(AuthorizedProjectUpdate::UserRefreshFromReplicaWorker).to(
receive(:bulk_perform_in).with(
1.hour,
user_ids,
batch_delay: 30.seconds, batch_size: 100
)
)
subject
end
it 'refreshes the permissions of the members of the old and new namespace', :sidekiq_inline do
expect { execute_transfer }
.to change { member_of_old_group.authorized_projects.include?(project) }.from(true).to(false)
.and change { member_of_new_group.authorized_projects.include?(project) }.from(false).to(true)
end
end
describe 'transferring a design repository' do
subject { described_class.new(project, user) }
before do
group.add_owner(user)
end
def design_repository
project.design_repository
end
def clear_design_repo_memoization
project&.design_management_repository&.clear_memoization(:repository)
project.clear_memoization(:design_repository)
end
it 'does not create a design repository' do
expect(subject.execute(group)).to be true
clear_design_repo_memoization
expect(design_repository.exists?).to be false
end
describe 'when the project has a design repository' do
let(:project_repo_path) { "#{project.path}#{::Gitlab::GlRepository::DESIGN.path_suffix}" }
let(:old_full_path) { "#{user.namespace.full_path}/#{project_repo_path}" }
let(:new_full_path) { "#{group.full_path}/#{project_repo_path}" }
context 'with legacy storage' do
let(:project) { create(:project, :repository, :legacy_storage, :design_repo, namespace: user.namespace) }
it 'moves the repository' do
expect(subject.execute(group)).to be true
clear_design_repo_memoization
expect(design_repository).to have_attributes(
disk_path: new_full_path,
full_path: new_full_path
)
end
it 'does not move the repository when an error occurs', :aggregate_failures do
allow(subject).to receive(:execute_system_hooks).and_raise('foo')
expect { subject.execute(group) }.to raise_error('foo')
clear_design_repo_memoization
expect(design_repository).to have_attributes(
disk_path: old_full_path,
full_path: old_full_path
)
end
end
context 'with hashed storage' do
let(:project) { create(:project, :repository, namespace: user.namespace) }
it 'does not move the repository' do
old_disk_path = design_repository.disk_path
expect(subject.execute(group)).to be true
clear_design_repo_memoization
expect(design_repository).to have_attributes(
disk_path: old_disk_path,
full_path: new_full_path
)
end
it 'does not move the repository when an error occurs' do
old_disk_path = design_repository.disk_path
allow(subject).to receive(:execute_system_hooks).and_raise('foo')
expect { subject.execute(group) }.to raise_error('foo')
clear_design_repo_memoization
expect(design_repository).to have_attributes(
disk_path: old_disk_path,
full_path: old_full_path
)
end
end
end
end
context 'handling issue contacts' do
let_it_be(:root_group) { create(:group) }
let(:project) { create(:project, group: root_group) }
before do
root_group.add_owner(user)
target.add_owner(user)
create_list(:issue_customer_relations_contact, 2, :for_issue, issue: create(:issue, project: project))
end
context 'with the same root_ancestor' do
let(:target) { create(:group, parent: root_group) }
it 'retains issue contacts' do
expect { execute_transfer }.not_to change { CustomerRelations::IssueContact.count }
end
end
context 'with a different root_ancestor' do
it 'deletes issue contacts' do
expect { execute_transfer }.to change { CustomerRelations::IssueContact.count }.by(-2)
end
end
end
def project_namespace_in_sync(group)
project.reload
expect(project.namespace).to eq(group)
expect(project.project_namespace.visibility_level).to eq(project.visibility_level)
expect(project.project_namespace.path).to eq(project.path)
expect(project.project_namespace.parent).to eq(project.namespace)
expect(project.project_namespace.traversal_ids).to eq([*project.namespace.traversal_ids, project.project_namespace.id])
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class RefreshBuildArtifactsSizeStatisticsService
BATCH_SIZE = 500
REFRESH_INTERVAL_SECONDS = 0.1
def execute
refresh = Projects::BuildArtifactsSizeRefresh.process_next_refresh!
return unless refresh&.running?
batch = refresh.next_batch(limit: BATCH_SIZE).to_a
if batch.any?
increments = batch.map do |artifact|
Gitlab::Counters::Increment.new(amount: artifact.size.to_i, ref: artifact.id)
end
Projects::BuildArtifactsSizeRefresh.transaction do
# Mark the refresh ready for another worker to pick up and process the next batch
refresh.requeue!(batch.last.id)
ProjectStatistics.bulk_increment_statistic(refresh.project, :build_artifacts_size, increments)
end
sleep REFRESH_INTERVAL_SECONDS
else
refresh.schedule_finalize!
end
refresh
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::RefreshBuildArtifactsSizeStatisticsService, :clean_gitlab_redis_shared_state, feature_category: :build_artifacts do
let(:service) { described_class.new }
describe '#execute' do
let_it_be(:project, reload: true) { create(:project) }
let_it_be(:artifact_1) { create(:ci_job_artifact, project: project, size: 1, created_at: 14.days.ago) }
let_it_be(:artifact_2) { create(:ci_job_artifact, project: project, size: 2, created_at: 13.days.ago) }
let_it_be(:artifact_3) { create(:ci_job_artifact, project: project, size: nil, created_at: 13.days.ago) }
let_it_be(:artifact_4) { create(:ci_job_artifact, project: project, size: 5, created_at: 12.days.ago) }
# This should not be included in the recalculation as it is created later than the refresh start time
let_it_be(:future_artifact) { create(:ci_job_artifact, project: project, size: 8, created_at: 2.days.from_now) }
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:created,
project: project,
updated_at: 2.days.ago,
refresh_started_at: nil,
last_job_artifact_id: nil
)
end
let(:now) { Time.zone.now }
let(:statistics) { project.statistics }
let(:increment) { Gitlab::Counters::Increment.new(amount: 30) }
around do |example|
freeze_time { example.run }
end
before do
stub_const("#{described_class}::BATCH_SIZE", 3)
stub_const("#{described_class}::REFRESH_INTERVAL_SECONDS", 0)
stats = create(:project_statistics, project: project, build_artifacts_size: 120)
stats.increment_counter(:build_artifacts_size, increment)
end
it 'resets the build artifacts size stats' do
expect { service.execute }.to change { statistics.reload.build_artifacts_size }.from(120).to(0)
end
it 'resets the buffered counter' do
expect { service.execute }
.to change { Gitlab::Counters::BufferedCounter.new(statistics, :build_artifacts_size).get }.to(0)
end
it 'updates the last_job_artifact_id to the ID of the last artifact from the batch' do
expect { service.execute }.to change { refresh.reload.last_job_artifact_id.to_i }.to(artifact_3.id)
end
it 'updates the last_job_artifact_id to the ID of the last artifact from the project' do
expect { service.execute }
.to change { refresh.reload.last_job_artifact_id_on_refresh_start.to_i }
.to(project.job_artifacts.last.id)
end
it 'requeues the refresh job' do
service.execute
expect(refresh.reload).to be_pending
end
context 'when an error happens after the recalculation has started' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
project: project,
last_job_artifact_id: artifact_3.id,
last_job_artifact_id_on_refresh_start: artifact_4.id
)
end
before do
allow(Gitlab::Redis::SharedState).to receive(:with).and_raise(StandardError, 'error')
expect { service.execute }.to raise_error(StandardError)
end
it 'keeps the last_job_artifact_id unchanged' do
expect(refresh.reload.last_job_artifact_id).to eq(artifact_3.id)
end
it 'keeps the last_job_artifact_id_on_refresh_start unchanged' do
expect(refresh.reload.last_job_artifact_id_on_refresh_start).to eq(artifact_4.id)
end
it 'keeps the state of the refresh record at running' do
expect(refresh.reload).to be_running
end
end
context 'when there are no more artifacts to recalculate for the next refresh job' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
project: project,
updated_at: 2.days.ago,
refresh_started_at: now,
last_job_artifact_id: artifact_4.id
)
end
it 'schedules the refresh to be finalized' do
service.execute
expect(refresh.reload.finalizing?).to be(true)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class ForkService < BaseService
def execute(fork_to_project = nil)
forked_project = fork_to_project ? link_existing_project(fork_to_project) : fork_new_project
if forked_project&.saved?
refresh_forks_count
stream_audit_event(forked_project)
end
forked_project
end
def valid_fork_targets(options = {})
@valid_fork_targets ||= ForkTargetsFinder.new(@project, current_user).execute(options)
end
def valid_fork_branch?(branch)
@project.repository.branch_exists?(branch)
end
def valid_fork_target?(namespace = target_namespace)
return true if current_user.admin?
valid_fork_targets.include?(namespace)
end
private
def link_existing_project(fork_to_project)
return if fork_to_project.forked?
build_fork_network_member(fork_to_project)
fork_to_project if link_fork_network(fork_to_project)
end
def fork_new_project
new_project = CreateService.new(current_user, new_fork_params).execute
return new_project unless new_project.persisted?
new_project.project_feature.update!(
@project.project_feature.slice(ProjectFeature::FEATURES.map { |f| "#{f}_access_level" })
)
new_project
end
def new_fork_params
new_params = {
forked_from_project: @project,
visibility_level: target_visibility_level,
description: target_description,
name: target_name,
path: target_path,
shared_runners_enabled: @project.shared_runners_enabled,
namespace_id: target_namespace.id,
fork_network: fork_network,
ci_config_path: @project.ci_config_path,
# We need to set ci_default_git_depth to 0 for the forked project when
# @project.ci_default_git_depth is nil in order to keep the same behaviour
# and not get ProjectCiCdSetting::DEFAULT_GIT_DEPTH set on create
ci_cd_settings_attributes: { default_git_depth: @project.ci_default_git_depth || 0 },
# We need to assign the fork network membership after the project has
# been instantiated to avoid ActiveRecord trying to create it when
# initializing the project, as that would cause a foreign key constraint
# exception.
relations_block: -> (project) { build_fork_network_member(project) },
skip_disk_validation: skip_disk_validation,
external_authorization_classification_label: @project.external_authorization_classification_label,
suggestion_commit_message: @project.suggestion_commit_message,
merge_commit_template: @project.merge_commit_template,
squash_commit_template: @project.squash_commit_template,
import_data: { data: { fork_branch: branch } }
}
if @project.avatar.present? && @project.avatar.image?
new_params[:avatar] = @project.avatar
end
new_params[:mr_default_target_self] = target_mr_default_target_self unless target_mr_default_target_self.nil?
new_params.merge!(@project.object_pool_params)
new_params
end
def allowed_fork?
current_user.can?(:fork_project, @project)
end
def fork_network
@fork_network ||= @project.fork_network || @project.build_root_of_fork_network
end
def build_fork_network_member(fork_to_project)
if allowed_fork?
fork_to_project.build_fork_network_member(
forked_from_project: @project,
fork_network: fork_network
)
else
fork_to_project.errors.add(:forked_from_project_id, 'is forbidden')
end
end
def link_fork_network(fork_to_project)
return if fork_to_project.errors.any?
fork_to_project.fork_network_member.save
end
def refresh_forks_count
Projects::ForksCountService.new(@project).refresh_cache
end
def target_path
@target_path ||= @params[:path] || @project.path
end
def target_name
@target_name ||= @params[:name] || @project.name
end
def target_description
@target_description ||= @params[:description] || @project.description
end
def target_namespace
@target_namespace ||= @params[:namespace] || current_user.namespace
end
def skip_disk_validation
@skip_disk_validation ||= @params[:skip_disk_validation] || false
end
def target_visibility_level
target_level = [@project.visibility_level, target_namespace.visibility_level].min
target_level = [target_level, Gitlab::VisibilityLevel.level_value(params[:visibility])].min if params.key?(:visibility)
Gitlab::VisibilityLevel.closest_allowed_level(target_level)
end
def target_mr_default_target_self
@target_mr_default_target_self ||= params[:mr_default_target_self]
end
def stream_audit_event(forked_project)
# Defined in EE
end
def branch
# We extract branch name from @params[:branches] because the front end
# insists on sending it as 'branches'.
@params[:branches]
end
end
end
Projects::ForkService.prepend_mod
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ForkService, feature_category: :source_code_management do
include ProjectForksHelper
shared_examples 'forks count cache refresh' do
it 'flushes the forks count cache of the source project', :clean_gitlab_redis_cache do
expect(from_project.forks_count).to be_zero
fork_project(from_project, to_user, using_service: true)
BatchLoader::Executor.clear_current
expect(from_project.forks_count).to eq(1)
end
end
context 'when forking a new project' do
describe 'fork by user' do
before do
@from_user = create(:user)
@from_namespace = @from_user.namespace
avatar = fixture_file_upload("spec/fixtures/dk.png", "image/png")
@from_project = create(
:project,
:repository,
creator_id: @from_user.id,
namespace: @from_namespace,
star_count: 107,
avatar: avatar,
description: 'wow such project',
external_authorization_classification_label: 'classification-label'
)
@to_user = create(:user)
@to_namespace = @to_user.namespace
@from_project.add_member(@to_user, :developer)
end
context 'fork project' do
context 'when forker is a guest' do
before do
@guest = create(:user)
@from_project.add_member(@guest, :guest)
end
subject { fork_project(@from_project, @guest, using_service: true) }
it { is_expected.not_to be_persisted }
it { expect(subject.errors[:forked_from_project_id]).to eq(['is forbidden']) }
it 'does not create a fork network' do
expect { subject }.not_to change { @from_project.reload.fork_network }
end
end
it_behaves_like 'forks count cache refresh' do
let(:from_project) { @from_project }
let(:to_user) { @to_user }
end
describe "successfully creates project in the user namespace" do
let(:to_project) { fork_project(@from_project, @to_user, namespace: @to_user.namespace, using_service: true) }
it { expect(to_project).to be_persisted }
it { expect(to_project.errors).to be_empty }
it { expect(to_project.first_owner).to eq(@to_user) }
it { expect(to_project.namespace).to eq(@to_user.namespace) }
it { expect(to_project.star_count).to be_zero }
it { expect(to_project.description).to eq(@from_project.description) }
it { expect(to_project.avatar.file).to be_exists }
it { expect(to_project.ci_config_path).to eq(@from_project.ci_config_path) }
it { expect(to_project.external_authorization_classification_label).to eq(@from_project.external_authorization_classification_label) }
it { expect(to_project.suggestion_commit_message).to eq(@from_project.suggestion_commit_message) }
it { expect(to_project.merge_commit_template).to eq(@from_project.merge_commit_template) }
it { expect(to_project.squash_commit_template).to eq(@from_project.squash_commit_template) }
# This test is here because we had a bug where the from-project lost its
# avatar after being forked.
# https://gitlab.com/gitlab-org/gitlab-foss/issues/26158
it "after forking the from-project still has its avatar" do
# If we do not fork the project first we cannot detect the bug.
expect(to_project).to be_persisted
expect(@from_project.avatar.file).to be_exists
end
it_behaves_like 'forks count cache refresh' do
let(:from_project) { @from_project }
let(:to_user) { @to_user }
end
it 'creates a fork network with the new project and the root project set' do
to_project
fork_network = @from_project.reload.fork_network
expect(fork_network).not_to be_nil
expect(fork_network.root_project).to eq(@from_project)
expect(fork_network.projects).to contain_exactly(@from_project, to_project)
end
it 'imports the repository of the forked project', :sidekiq_might_not_need_inline do
to_project = fork_project(@from_project, @to_user, repository: true, using_service: true)
expect(to_project.empty_repo?).to be_falsy
end
end
context 'creating a fork of a fork' do
let(:from_forked_project) { fork_project(@from_project, @to_user, using_service: true) }
let(:other_namespace) do
group = create(:group)
group.add_owner(@to_user)
group
end
let(:to_project) { fork_project(from_forked_project, @to_user, namespace: other_namespace, using_service: true) }
it 'sets the root of the network to the root project' do
expect(to_project.fork_network.root_project).to eq(@from_project)
end
it 'sets the forked_from_project on the membership' do
expect(to_project.fork_network_member.forked_from_project).to eq(from_forked_project)
end
context 'when the forked project has higher visibility than the root project' do
let(:root_project) { create(:project, :public) }
it 'successfully creates a fork of the fork with correct visibility' do
forked_project = fork_project(root_project, @to_user, using_service: true)
root_project.update!(visibility_level: Gitlab::VisibilityLevel::INTERNAL)
# Forked project visibility is not affected by root project visibility change
expect(forked_project).to have_attributes(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
fork_of_the_fork = fork_project(forked_project, @to_user, namespace: other_namespace, using_service: true)
expect(fork_of_the_fork).to be_valid
expect(fork_of_the_fork).to have_attributes(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
end
end
it_behaves_like 'forks count cache refresh' do
let(:from_project) { from_forked_project }
let(:to_user) { @to_user }
end
end
end
context 'project already exists' do
it "fails due to validation, not transaction failure" do
@existing_project = create(:project, :repository, creator_id: @to_user.id, path: @from_project.path, namespace: @to_namespace)
@to_project = fork_project(@from_project, @to_user, namespace: @to_namespace, using_service: true)
expect(@existing_project).to be_persisted
expect(@to_project).not_to be_persisted
expect(@to_project.errors[:path]).to eq(['has already been taken'])
end
end
context 'repository in legacy storage already exists' do
let(:raw_fake_repo) { Gitlab::Git::Repository.new('default', File.join(@to_user.namespace.full_path, "#{@from_project.path}.git"), nil, nil) }
let(:params) { { namespace: @to_user.namespace, using_service: true } }
before do
stub_application_setting(hashed_storage_enabled: false)
raw_fake_repo.create_repository
end
after do
raw_fake_repo.remove
end
subject { fork_project(@from_project, @to_user, params) }
it 'does not allow creation' do
expect(subject).not_to be_persisted
expect(subject.errors.messages).to have_key(:base)
expect(subject.errors.messages[:base].first).to match('There is already a repository with that name on disk')
end
context 'when repository disk validation is explicitly skipped' do
let(:params) { super().merge(skip_disk_validation: true) }
it 'allows fork project creation' do
expect(subject).to be_persisted
expect(subject.errors.messages).to be_empty
end
end
end
context "CI/CD settings" do
let(:to_project) { fork_project(@from_project, @to_user, using_service: true) }
context "when origin has git depth specified" do
before do
@from_project.update!(ci_default_git_depth: 42)
end
it "inherits default_git_depth from the origin project" do
expect(to_project.ci_default_git_depth).to eq(42)
end
end
context "when origin does not define git depth" do
before do
@from_project.update!(ci_default_git_depth: nil)
end
it "the fork has git depth set to 0" do
expect(to_project.ci_default_git_depth).to eq(0)
end
end
end
context "when project has restricted visibility level" do
context "and only one visibility level is restricted" do
before do
@from_project.update!(visibility_level: Gitlab::VisibilityLevel::INTERNAL)
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::INTERNAL])
end
it "creates fork with lowest level" do
forked_project = fork_project(@from_project, @to_user, using_service: true)
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
end
context "and all visibility levels are restricted" do
before do
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::PUBLIC, Gitlab::VisibilityLevel::INTERNAL, Gitlab::VisibilityLevel::PRIVATE])
end
it "creates fork with private visibility levels" do
forked_project = fork_project(@from_project, @to_user, using_service: true)
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
end
end
context 'when forking is disabled' do
before do
@from_project.project_feature.update_attribute(
:forking_access_level, ProjectFeature::DISABLED)
end
it 'fails' do
to_project = fork_project(@from_project, @to_user, namespace: @to_user.namespace, using_service: true)
expect(to_project.errors[:forked_from_project_id]).to eq(['is forbidden'])
end
end
end
describe 'fork to namespace' do
before do
@group_owner = create(:user)
@developer = create(:user)
@project = create(
:project, :repository,
creator_id: @group_owner.id,
star_count: 777,
description: 'Wow, such a cool project!',
ci_config_path: 'debian/salsa-ci.yml'
)
@group = create(:group)
@group.add_member(@group_owner, GroupMember::OWNER)
@group.add_member(@developer, GroupMember::DEVELOPER)
@project.add_member(@developer, :developer)
@project.add_member(@group_owner, :developer)
@opts = { namespace: @group, using_service: true }
end
context 'fork project for group' do
it 'group owner successfully forks project into the group' do
to_project = fork_project(@project, @group_owner, @opts)
expect(to_project).to be_persisted
expect(to_project.errors).to be_empty
expect(to_project.first_owner).to eq(@group_owner)
expect(to_project.namespace).to eq(@group)
expect(to_project.name).to eq(@project.name)
expect(to_project.path).to eq(@project.path)
expect(to_project.description).to eq(@project.description)
expect(to_project.ci_config_path).to eq(@project.ci_config_path)
expect(to_project.star_count).to be_zero
end
end
context 'fork project for group when user not owner' do
it 'group developer fails to fork project into the group' do
to_project = fork_project(@project, @developer, @opts)
expect(to_project.errors[:namespace]).to eq(['is not valid'])
end
end
context 'project already exists in group' do
it 'fails due to validation, not transaction failure' do
existing_project = create(:project, :repository, path: @project.path, namespace: @group)
to_project = fork_project(@project, @group_owner, @opts)
expect(existing_project.persisted?).to be_truthy
expect(to_project.errors[:path]).to eq(['has already been taken'])
end
end
context 'when the namespace has a lower visibility level than the project' do
it 'creates the project with the lower visibility level' do
public_project = create(:project, :public)
private_group = create(:group, :private)
group_owner = create(:user)
private_group.add_owner(group_owner)
forked_project = fork_project(public_project, group_owner, namespace: private_group, using_service: true)
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
end
end
describe 'fork with optional attributes' do
let(:public_project) { create(:project, :public) }
it 'sets optional attributes to specified values' do
forked_project = fork_project(
public_project,
nil,
namespace: public_project.namespace,
path: 'forked',
name: 'My Fork',
description: 'Description',
visibility: 'internal',
using_service: true
)
expect(forked_project.path).to eq('forked')
expect(forked_project.name).to eq('My Fork')
expect(forked_project.description).to eq('Description')
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::INTERNAL)
end
it 'sets visibility level to private if an unknown visibility is requested' do
forked_project = fork_project(public_project, nil, using_service: true, visibility: 'unknown')
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
it 'sets visibility level to project visibility level if requested visibility is greater' do
private_project = create(:project, :private)
forked_project = fork_project(private_project, nil, using_service: true, visibility: 'public')
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
it 'sets visibility level to target namespace visibility level if requested visibility is greater' do
private_group = create(:group, :private)
forked_project = fork_project(public_project, nil, namespace: private_group, using_service: true, visibility: 'public')
expect(forked_project.visibility_level).to eq(Gitlab::VisibilityLevel::PRIVATE)
end
it 'copies project features visibility settings to the fork', :aggregate_failures do
attrs = ProjectFeature::FEATURES.to_h do |f|
["#{f}_access_level", ProjectFeature::PRIVATE]
end
public_project.project_feature.update!(attrs)
user = create(:user, developer_projects: [public_project])
forked_project = described_class.new(public_project, user).execute
expect(forked_project.project_feature.slice(attrs.keys)).to eq(attrs)
end
end
end
context 'when a project is already forked' do
it 'creates a new pool repository after the project is moved to a new shard' do
project = create(:project, :public, :repository)
fork_before_move = fork_project(project, nil, using_service: true)
# Stub everything required to move a project to a Gitaly shard that does not exist
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('default').and_call_original
allow(Gitlab::GitalyClient).to receive(:filesystem_id).with('test_second_storage').and_return(SecureRandom.uuid)
stub_storage_settings('test_second_storage' => {})
allow_any_instance_of(Gitlab::Git::Repository).to receive(:create_repository)
.and_return(true)
allow_any_instance_of(Gitlab::Git::Repository).to receive(:replicate)
allow_any_instance_of(Gitlab::Git::Repository).to receive(:checksum)
.and_return(::Gitlab::Git::BLANK_SHA)
allow_next_instance_of(Gitlab::Git::ObjectPool) do |object_pool|
allow(object_pool).to receive(:link)
end
storage_move = create(
:project_repository_storage_move,
:scheduled,
container: project,
destination_storage_name: 'test_second_storage'
)
Projects::UpdateRepositoryStorageService.new(storage_move).execute
fork_after_move = fork_project(project.reload, nil, using_service: true)
pool_repository_before_move = PoolRepository.joins(:shard)
.find_by(source_project: project, shards: { name: 'default' })
pool_repository_after_move = PoolRepository.joins(:shard)
.find_by(source_project: project, shards: { name: 'test_second_storage' })
expect(fork_before_move.pool_repository).to eq(pool_repository_before_move)
expect(fork_after_move.pool_repository).to eq(pool_repository_after_move)
end
end
context 'when forking with object pools' do
let(:fork_from_project) { create(:project, :repository, :public) }
let(:forker) { create(:user) }
context 'when no pool exists' do
it 'creates a new object pool' do
forked_project = fork_project(fork_from_project, forker, using_service: true)
expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository)
end
end
context 'when a pool already exists' do
let!(:pool_repository) { create(:pool_repository, source_project: fork_from_project) }
it 'joins the object pool' do
forked_project = fork_project(fork_from_project, forker, using_service: true)
expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository)
end
end
end
context 'when linking fork to an existing project' do
let(:fork_from_project) { create(:project, :public) }
let(:fork_to_project) { create(:project, :public) }
let(:user) do
create(:user).tap { |u| fork_to_project.add_maintainer(u) }
end
subject { described_class.new(fork_from_project, user) }
def forked_from_project(project)
project.fork_network_member&.forked_from_project
end
context 'if project is already forked' do
it 'does not create fork relation' do
allow(fork_to_project).to receive(:forked?).and_return(true)
expect(forked_from_project(fork_to_project)).to be_nil
expect(subject.execute(fork_to_project)).to be_nil
expect(forked_from_project(fork_to_project)).to be_nil
end
end
context 'if project is not forked' do
it 'creates fork relation' do
expect(fork_to_project.forked?).to be_falsy
expect(forked_from_project(fork_to_project)).to be_nil
subject.execute(fork_to_project)
fork_to_project.reload
expect(fork_to_project.forked?).to be true
expect(forked_from_project(fork_to_project)).to eq fork_from_project
expect(fork_to_project.forked_from_project).to eq fork_from_project
end
it 'flushes the forks count cache of the source project' do
expect(fork_from_project.forks_count).to be_zero
subject.execute(fork_to_project)
BatchLoader::Executor.clear_current
expect(fork_from_project.forks_count).to eq(1)
end
context 'if the fork is not allowed' do
let(:fork_from_project) { create(:project, :private) }
it 'does not delete the LFS objects' do
create(:lfs_objects_project, project: fork_to_project)
expect { subject.execute(fork_to_project) }
.not_to change { fork_to_project.lfs_objects_projects.size }
end
end
end
end
describe '#valid_fork_targets' do
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: ['finder_return_value']) }
let(:current_user) { instance_double('User') }
let(:project) { instance_double('Project') }
before do
allow(ForkTargetsFinder).to receive(:new).with(project, current_user).and_return(finder_mock)
end
it 'returns whatever finder returns' do
expect(described_class.new(project, current_user).valid_fork_targets).to eq ['finder_return_value']
end
end
describe '#valid_fork_branch?' do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :small_repo, creator_id: user.id) }
let_it_be(:branch) { nil }
subject { described_class.new(project, user).valid_fork_branch?(branch) }
context 'when branch exists' do
let(:branch) { project.default_branch_or_main }
it { is_expected.to be_truthy }
end
context 'when branch does not exist' do
let(:branch) { 'branch-that-does-not-exist' }
it { is_expected.to be_falsey }
end
end
describe '#valid_fork_target?' do
let(:project) { Project.new }
let(:params) { {} }
context 'when target is not passed' do
subject { described_class.new(project, user, params).valid_fork_target? }
context 'when current user is an admin' do
let(:user) { build(:user, :admin) }
it { is_expected.to be_truthy }
end
context 'when current_user is not an admin' do
let(:user) { create(:user) }
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [user.namespace]) }
let(:project) { create(:project) }
before do
allow(ForkTargetsFinder).to receive(:new).with(project, user).and_return(finder_mock)
end
context 'when target namespace is in valid fork targets' do
let(:params) { { namespace: user.namespace } }
it { is_expected.to be_truthy }
end
context 'when target namespace is not in valid fork targets' do
let(:params) { { namespace: create(:group) } }
it { is_expected.to be_falsey }
end
end
end
context 'when target is passed' do
let(:target) { create(:group) }
subject { described_class.new(project, user, params).valid_fork_target?(target) }
context 'when current user is an admin' do
let(:user) { build(:user, :admin) }
it { is_expected.to be_truthy }
end
context 'when current user is not an admin' do
let(:user) { create(:user) }
before do
allow(ForkTargetsFinder).to receive(:new).with(project, user).and_return(finder_mock)
end
context 'when target namespace is in valid fork targets' do
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [target]) }
it { is_expected.to be_truthy }
end
context 'when target namespace is not in valid fork targets' do
let(:finder_mock) { instance_double('ForkTargetsFinder', execute: [create(:group)]) }
it { is_expected.to be_falsey }
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UnlinkForkService < BaseService
# Close existing MRs coming from the project and remove it from the fork network
def execute(refresh_statistics: true)
fork_network = @project.fork_network
forked_from = @project.forked_from_project
return unless fork_network
log_info(message: "UnlinkForkService: Unlinking fork network", fork_network_id: fork_network.id)
merge_requests = fork_network
.merge_requests
.opened
.from_and_to_forks(@project)
merge_requests.find_each do |mr|
::MergeRequests::CloseService.new(project: @project, current_user: @current_user).execute(mr)
log_info(message: "UnlinkForkService: Closed merge request", merge_request_id: mr.id)
end
Project.transaction do
# Get out of the fork network as a member and
# remove references from all its direct forks.
@project.fork_network_member.destroy
@project.forked_to_members.update_all(forked_from_project_id: nil)
# The project is not necessarily a fork, so update the fork network originating
# from this project
if fork_network = @project.root_of_fork_network
fork_network.update(root_project: nil, deleted_root_project_name: @project.full_name)
end
@project.leave_pool_repository
end
# rubocop: disable Cop/InBatches
Project.uncached do
@project.forked_to_members.in_batches do |fork_relation|
fork_relation.pluck(:id).each do |fork_id| # rubocop: disable CodeReuse/ActiveRecord
log_info(message: "UnlinkForkService: Unlinked fork of root_project", project_id: @project.id, forked_project_id: fork_id)
end
end
end
# rubocop: enable Cop/InBatches
if Feature.enabled?(:refresh_statistics_on_unlink_fork, @project.namespace) && refresh_statistics
ProjectCacheWorker.perform_async(project.id, [], [:repository_size])
end
# When the project getting out of the network is a node with parent
# and children, both the parent and the node needs a cache refresh.
[forked_from, @project].compact.each do |project|
refresh_forks_count(project)
end
end
private
def refresh_forks_count(project)
Projects::ForksCountService.new(project).refresh_cache
end
end
end
Projects::UnlinkForkService.prepend_mod_with('Projects::UnlinkForkService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::UnlinkForkService, :use_clean_rails_memory_store_caching, feature_category: :source_code_management do
include ProjectForksHelper
subject { described_class.new(forked_project, user) }
let(:project) { create(:project, :public) }
let!(:forked_project) { fork_project(project, user) }
let(:user) { create(:user) }
context 'with opened merge request on the source project' do
let(:merge_request) { create(:merge_request, source_project: forked_project, target_project: forked_project.forked_from_project) }
let(:merge_request2) { create(:merge_request, source_project: forked_project, target_project: fork_project(project)) }
let(:merge_request_in_fork) { create(:merge_request, source_project: forked_project, target_project: forked_project) }
let(:mr_close_service) { MergeRequests::CloseService.new(project: forked_project, current_user: user) }
before do
allow(MergeRequests::CloseService).to receive(:new)
.with(project: forked_project, current_user: user)
.and_return(mr_close_service)
end
it 'close all pending merge requests' do
expect(mr_close_service).to receive(:execute).with(merge_request)
expect(mr_close_service).to receive(:execute).with(merge_request2)
subject.execute
end
it 'does not close merge requests for the project being unlinked' do
expect(mr_close_service).not_to receive(:execute).with(merge_request_in_fork)
end
end
it 'removes the link to the fork network' do
expect(forked_project.fork_network_member).to be_present
expect(forked_project.fork_network).to be_present
subject.execute
forked_project.reload
expect(forked_project.fork_network_member).to be_nil
expect(forked_project.reload.fork_network).to be_nil
end
it 'refreshes the forks count cache of the source project' do
source = forked_project.forked_from_project
expect(source.forks_count).to eq(1)
subject.execute
BatchLoader::Executor.clear_current
expect(source.forks_count).to be_zero
end
it 'refreshes the project statistics of the forked project' do
expect(ProjectCacheWorker).to receive(:perform_async).with(forked_project.id, [], [:repository_size])
subject.execute
end
it 'does not refresh project statistics when refresh_statistics is false' do
expect(ProjectCacheWorker).not_to receive(:perform_async)
subject.execute(refresh_statistics: false)
end
it 'does not refresh project statistics when the feature flag is disabled' do
stub_feature_flags(refresh_statistics_on_unlink_fork: false)
expect(ProjectCacheWorker).not_to receive(:perform_async)
subject.execute
end
context 'when the original project was deleted' do
it 'does not fail when the original project is deleted' do
source = forked_project.forked_from_project
source.destroy!
forked_project.reload
expect { subject.execute }.not_to raise_error
end
end
context 'when given project is a source of forks' do
let!(:forked_project_2) { fork_project(project, user) }
let!(:fork_of_fork) { fork_project(forked_project, user) }
subject { described_class.new(project, user) }
context 'with opened merge requests from fork back to root project' do
let!(:merge_request) { create(:merge_request, source_project: project, target_project: forked_project) }
let!(:merge_request2) { create(:merge_request, source_project: project, target_project: fork_project(project)) }
let!(:merge_request_in_fork) { create(:merge_request, source_project: forked_project, target_project: forked_project) }
let(:mr_close_service) { MergeRequests::CloseService.new(project: project, current_user: user) }
before do
allow(MergeRequests::CloseService).to receive(:new)
.with(project: project, current_user: user)
.and_return(mr_close_service)
end
it 'closes all pending merge requests' do
expect(mr_close_service).to receive(:execute).with(merge_request)
expect(mr_close_service).to receive(:execute).with(merge_request2)
subject.execute
end
it 'does not close merge requests that do not come from the project being unlinked' do
expect(mr_close_service).not_to receive(:execute).with(merge_request_in_fork)
subject.execute
end
end
it 'removes its link to the fork network and updates direct network members' do
expect(project.fork_network_member).to be_present
expect(project.fork_network).to be_present
expect(project.forked_to_members.count).to eq(2)
expect(forked_project.forked_to_members.count).to eq(1)
expect(fork_of_fork.forked_to_members.count).to eq(0)
subject.execute
project.reload
forked_project.reload
fork_of_fork.reload
expect(project.fork_network_member).to be_nil
expect(project.fork_network).to be_nil
expect(forked_project.fork_network).to have_attributes(
root_project_id: nil,
deleted_root_project_name: project.full_name
)
expect(project.forked_to_members.count).to eq(0)
expect(forked_project.forked_to_members.count).to eq(1)
expect(fork_of_fork.forked_to_members.count).to eq(0)
end
it 'refreshes the forks count cache of the given project' do
expect(project.forks_count).to eq(2)
subject.execute
BatchLoader::Executor.clear_current
expect(project.forks_count).to be_zero
end
context 'and is node with a parent' do
subject { described_class.new(forked_project, user) }
context 'with opened merge requests from and to given project' do
let!(:mr_from_parent) { create(:merge_request, source_project: project, target_project: forked_project) }
let!(:mr_to_parent) { create(:merge_request, source_project: forked_project, target_project: project) }
let!(:mr_to_child) { create(:merge_request, source_project: forked_project, target_project: fork_of_fork) }
let!(:mr_from_child) { create(:merge_request, source_project: fork_of_fork, target_project: forked_project) }
let!(:merge_request_in_fork) { create(:merge_request, source_project: forked_project, target_project: forked_project) }
let(:mr_close_service) { MergeRequests::CloseService.new(project: forked_project, current_user: user) }
before do
allow(MergeRequests::CloseService).to receive(:new)
.with(project: forked_project, current_user: user)
.and_return(mr_close_service)
end
it 'close all pending merge requests' do
merge_requests = [mr_from_parent, mr_to_parent, mr_from_child, mr_to_child]
merge_requests.each do |mr|
expect(mr_close_service).to receive(:execute).with(mr).and_call_original
end
subject.execute
merge_requests = MergeRequest.where(id: merge_requests)
expect(merge_requests).to all(have_attributes(state: 'closed'))
end
it 'does not close merge requests which do not come from the project being unlinked' do
expect(mr_close_service).not_to receive(:execute).with(merge_request_in_fork)
subject.execute
end
end
it 'refreshes the forks count cache of the parent and the given project' do
expect(project.forks_count).to eq(2)
expect(forked_project.forks_count).to eq(1)
subject.execute
BatchLoader::Executor.clear_current
expect(project.forks_count).to eq(1)
expect(forked_project.forks_count).to eq(0)
end
it 'removes its link to the fork network and updates direct network members' do
expect(project.fork_network).to be_present
expect(forked_project.fork_network).to be_present
expect(fork_of_fork.fork_network).to be_present
expect(project.forked_to_members.count).to eq(2)
expect(forked_project.forked_to_members.count).to eq(1)
expect(fork_of_fork.forked_to_members.count).to eq(0)
subject.execute
project.reload
forked_project.reload
fork_of_fork.reload
expect(project.fork_network).to be_present
expect(forked_project.fork_network).to be_nil
expect(fork_of_fork.fork_network).to be_present
expect(project.forked_to_members.count).to eq(1) # 1 child is gone
expect(forked_project.forked_to_members.count).to eq(0)
expect(fork_of_fork.forked_to_members.count).to eq(0)
end
end
end
context 'a project with pool repository' do
let(:project) { create(:project, :public, :repository) }
let!(:pool_repository) { create(:pool_repository, :ready, source_project: project) }
subject { described_class.new(project, user) }
it 'when unlinked leaves pool repository' do
expect { subject.execute }.to change { project.reload.has_pool_repository? }.from(true).to(false)
end
end
context 'when given project is not part of a fork network' do
let!(:project_without_forks) { create(:project, :public) }
subject { described_class.new(project_without_forks, user) }
it 'does not raise errors' do
expect { subject.execute }.not_to raise_error
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for getting and caching the number of forks of a project.
class ForksCountService < Projects::CountService
def cache_key_name
'forks_count'
end
# rubocop: disable CodeReuse/ActiveRecord
def self.query(project_ids)
ForkNetworkMember.where(forked_from_project: project_ids)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ForksCountService, :use_clean_rails_memory_store_caching, feature_category: :source_code_management do
let(:project) { build(:project) }
subject { described_class.new(project) }
it_behaves_like 'a counter caching service'
describe '#count' do
it 'returns the number of forks' do
allow(subject).to receive(:uncached_count).and_return(1)
expect(subject.count).to eq(1)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# The CleanupService removes data from the project repository following a
# BFG rewrite: https://rtyley.github.io/bfg-repo-cleaner/
#
# Before executing this service, all refs rewritten by BFG should have been
# pushed to the repository
class CleanupService < BaseService
NoUploadError = StandardError.new("Couldn't find uploaded object map")
include Gitlab::Utils::StrongMemoize
class << self
def enqueue(project, current_user, bfg_object_map)
Projects::UpdateService.new(project, current_user, bfg_object_map: bfg_object_map).execute.tap do |result|
next unless result[:status] == :success
project.set_repository_read_only!
RepositoryCleanupWorker.perform_async(project.id, current_user.id)
end
rescue Project::RepositoryReadOnlyError => err
{ status: :error, message: (_('Failed to make repository read-only. %{reason}') % { reason: err.message }) }
end
def cleanup_after(project)
project.bfg_object_map.remove!
project.set_repository_writable!
end
end
# Attempt to clean up the project following the push. Warning: this is
# destructive!
#
# path is the path of an upload of a BFG object map file. It contains a line
# per rewritten object, with the old and new SHAs space-separated. It can be
# used to update or remove content that references the objects that BFG has
# altered
def execute
apply_bfg_object_map!
# Remove older objects that are no longer referenced
Projects::GitGarbageCollectWorker.new.perform(project.id, :prune, "project_cleanup:gc:#{project.id}")
# The cache may now be inaccurate, and holding onto it could prevent
# bugs assuming the presence of some object from manifesting for some
# time. Better to feel the pain immediately.
project.repository.expire_all_method_caches
self.class.cleanup_after(project)
end
private
def apply_bfg_object_map!
raise NoUploadError unless project.bfg_object_map.exists?
project.bfg_object_map.open do |io|
repository_cleaner.apply_bfg_object_map_stream(io) do |response|
cleanup_diffs(response)
end
end
end
def cleanup_diffs(response)
old_commit_shas = extract_old_commit_shas(response.entries)
ApplicationRecord.transaction do
cleanup_merge_request_diffs(old_commit_shas)
cleanup_note_diff_files(old_commit_shas)
end
end
def extract_old_commit_shas(batch)
batch.lazy.select { |entry| entry.type == :COMMIT }.map(&:old_oid).force
end
def cleanup_merge_request_diffs(old_commit_shas)
merge_request_diffs = MergeRequestDiff
.by_project_id(project.id)
.by_commit_sha(old_commit_shas)
# It's important to run the ActiveRecord callbacks here
merge_request_diffs.destroy_all # rubocop:disable Cop/DestroyAll
# TODO: ensure the highlight cache is removed immediately. It's too hard
# to calculate the Redis keys at present.
#
# https://gitlab.com/gitlab-org/gitlab-foss/issues/61115
end
def cleanup_note_diff_files(old_commit_shas)
# Pluck the IDs instead of running the query twice to ensure we clear the
# cache for exactly the note diffs we remove
ids = NoteDiffFile
.referencing_sha(old_commit_shas, project_id: project.id)
.pluck_primary_key
NoteDiffFile.id_in(ids).delete_all
# A highlighted version of the diff is stored in redis. Remove it now.
Gitlab::DiscussionsDiff::HighlightCache.clear_multiple(ids)
end
def repository_cleaner
@repository_cleaner ||= Gitlab::Git::RepositoryCleaner.new(repository.raw)
end
end
end
Projects::CleanupService.prepend_mod_with('Projects::CleanupService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::CleanupService, feature_category: :source_code_management do
subject(:service) { described_class.new(project) }
describe '.enqueue' do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let(:object_map_file) { fixture_file_upload('spec/fixtures/bfg_object_map.txt') }
subject(:enqueue) { described_class.enqueue(project, user, object_map_file) }
it 'makes the repository read-only' do
expect { enqueue }
.to change(project, :repository_read_only?)
.from(false)
.to(true)
end
it 'sets the bfg_object_map of the project' do
enqueue
expect(project.bfg_object_map.read).to eq(object_map_file.read)
end
it 'enqueues a RepositoryCleanupWorker' do
enqueue
expect(RepositoryCleanupWorker.jobs.count).to eq(1)
end
it 'returns success' do
expect(enqueue[:status]).to eq(:success)
end
it 'returns an error if making the repository read-only fails' do
project.set_repository_read_only!
expect(enqueue[:status]).to eq(:error)
end
it 'returns an error if updating the project fails' do
expect_next_instance_of(Projects::UpdateService) do |service|
expect(service).to receive(:execute).and_return(status: :error)
end
expect(enqueue[:status]).to eq(:error)
expect(project.reload.repository_read_only?).to be_falsy
end
end
describe '.cleanup_after' do
let(:project) { create(:project, :repository, bfg_object_map: fixture_file_upload('spec/fixtures/bfg_object_map.txt')) }
subject(:cleanup_after) { described_class.cleanup_after(project) }
before do
project.set_repository_read_only!
end
it 'sets the repository read-write' do
expect { cleanup_after }.to change(project, :repository_read_only?).from(true).to(false)
end
it 'removes the BFG object map' do
cleanup_after
expect(project.bfg_object_map).not_to be_exist
end
end
describe '#execute' do
let(:project) { create(:project, :repository, bfg_object_map: fixture_file_upload('spec/fixtures/bfg_object_map.txt')) }
let(:object_map) { project.bfg_object_map }
let(:cleaner) { service.__send__(:repository_cleaner) }
before do
project.set_repository_read_only!
end
it 'runs the apply_bfg_object_map_stream gitaly RPC' do
expect(cleaner).to receive(:apply_bfg_object_map_stream).with(kind_of(IO))
service.execute
end
it 'runs garbage collection on the repository' do
expect_next_instance_of(Projects::GitGarbageCollectWorker) do |worker|
expect(worker).to receive(:perform).with(project.id, :prune, "project_cleanup:gc:#{project.id}")
end
service.execute
end
it 'clears the repository cache' do
expect(project.repository).to receive(:expire_all_method_caches)
service.execute
end
it 'removes the object map file' do
service.execute
expect(object_map.exists?).to be_falsy
end
it 'makes the repository read-write again' do
expect { service.execute }
.to change(project, :repository_read_only?)
.from(true)
.to(false)
end
context 'with a tainted merge request diff' do
let(:merge_request) { create(:merge_request, source_project: project, target_project: project) }
let(:diff) { merge_request.merge_request_diff }
let(:entry) { build_entry(diff.commits.first.id) }
before do
allow(cleaner)
.to receive(:apply_bfg_object_map_stream)
.and_yield(Gitaly::ApplyBfgObjectMapStreamResponse.new(entries: [entry]))
end
it 'removes the tainted commit from the database' do
service.execute
expect(MergeRequestDiff.exists?(diff.id)).to be_falsy
end
it 'ignores non-commit responses from Gitaly' do
entry.type = :UNKNOWN
service.execute
expect(MergeRequestDiff.exists?(diff.id)).to be_truthy
end
end
context 'with a tainted diff note' do
let(:diff_note) { create(:diff_note_on_commit, project: project) }
let(:note_diff_file) { diff_note.note_diff_file }
let(:entry) { build_entry(diff_note.commit_id) }
let(:highlight_cache) { Gitlab::DiscussionsDiff::HighlightCache }
let(:cache_id) { note_diff_file.id }
before do
allow(cleaner)
.to receive(:apply_bfg_object_map_stream)
.and_yield(Gitaly::ApplyBfgObjectMapStreamResponse.new(entries: [entry]))
end
it 'removes the tainted commit from the database' do
service.execute
expect(NoteDiffFile.exists?(note_diff_file.id)).to be_falsy
end
it 'removes the highlight cache from redis' do
write_cache(highlight_cache, cache_id, [{}])
expect(read_cache(highlight_cache, cache_id)).not_to be_nil
service.execute
expect(read_cache(highlight_cache, cache_id)).to be_nil
end
it 'ignores non-commit responses from Gitaly' do
entry.type = :UNKNOWN
service.execute
expect(NoteDiffFile.exists?(note_diff_file.id)).to be_truthy
end
end
it 'raises an error if no object map can be found' do
object_map.remove!
expect { service.execute }.to raise_error(described_class::NoUploadError)
end
end
def build_entry(old_oid)
Gitaly::ApplyBfgObjectMapStreamResponse::Entry.new(
type: :COMMIT,
old_oid: old_oid,
new_oid: Gitlab::Git::BLANK_SHA
)
end
def read_cache(cache, key)
cache.read_multiple([key]).first
end
def write_cache(cache, key, value)
cache.write_multiple(key => value)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UpdateRemoteMirrorService < BaseService
include Gitlab::Utils::StrongMemoize
MAX_TRIES = 3
def execute(remote_mirror, tries)
return success unless remote_mirror.enabled?
# Blocked URLs are a hard failure, no need to attempt to retry
if Gitlab::UrlBlocker.blocked_url?(normalized_url(remote_mirror.url), schemes: Project::VALID_MIRROR_PROTOCOLS)
hard_retry_or_fail(remote_mirror, _('The remote mirror URL is invalid.'), tries)
return error(remote_mirror.last_error)
end
update_mirror(remote_mirror)
success
rescue Gitlab::Git::CommandError => e
# This happens if one of the gitaly calls above fail, for example when
# branches have diverged, or the pre-receive hook fails.
hard_retry_or_fail(remote_mirror, e.message, tries)
error(e.message)
rescue StandardError => e
remote_mirror.hard_fail!(e.message)
raise e
end
private
def normalized_url(url)
strong_memoize(:normalized_url) do
CGI.unescape(Gitlab::UrlSanitizer.sanitize(url))
end
end
def update_mirror(remote_mirror)
remote_mirror.update_start!
# LFS objects must be sent first, or the push has dangling pointers
lfs_status = send_lfs_objects!(remote_mirror)
response = remote_mirror.update_repository
failed, failure_message = failure_status(lfs_status, response, remote_mirror)
# When the issue https://gitlab.com/gitlab-org/gitlab/-/issues/349262 is closed,
# we can move this block within failure_status.
if failed
remote_mirror.mark_as_failed!(failure_message)
else
remote_mirror.update_finish!
end
end
def failure_status(lfs_status, response, remote_mirror)
message = ''
failed = false
lfs_sync_failed = false
if lfs_status&.dig(:status) == :error
lfs_sync_failed = true
message += "Error synchronizing LFS files:"
message += "\n\n#{lfs_status[:message]}\n\n"
failed = Feature.enabled?(:remote_mirror_fail_on_lfs, project)
end
if response.divergent_refs.any?
message += "Some refs have diverged and have not been updated on the remote:"
message += "\n\n#{response.divergent_refs.join("\n")}"
failed = true
end
if message.present?
Gitlab::AppJsonLogger.info(
message: "Error synching remote mirror",
project_id: project.id,
project_path: project.full_path,
remote_mirror_id: remote_mirror.id,
lfs_sync_failed: lfs_sync_failed,
divergent_ref_list: response.divergent_refs
)
end
[failed, message]
end
def send_lfs_objects!(remote_mirror)
return unless project.lfs_enabled?
# TODO: Support LFS sync over SSH
# https://gitlab.com/gitlab-org/gitlab/-/issues/249587
return unless %r{\Ahttps?://}i.match?(remote_mirror.url)
return unless remote_mirror.password_auth?
Lfs::PushService.new(
project,
current_user,
url: remote_mirror.bare_url,
credentials: remote_mirror.credentials
).execute
end
def hard_retry_or_fail(mirror, message, tries)
if tries < MAX_TRIES
mirror.hard_retry!(message)
else
# It's not likely we'll be able to recover from this ourselves, so we'll
# notify the users of the problem, and don't trigger any sidekiq retries
# Instead, we'll wait for the next change to try the push again, or until
# a user manually retries.
mirror.hard_fail!(message)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::UpdateRemoteMirrorService, feature_category: :source_code_management do
let_it_be(:project) { create(:project, :repository, lfs_enabled: true) }
let_it_be(:remote_project) { create(:forked_project_with_submodules) }
let_it_be(:remote_mirror) { create(:remote_mirror, project: project, enabled: true) }
subject(:service) { described_class.new(project, project.creator) }
describe '#execute' do
let(:retries) { 0 }
subject(:execute!) { service.execute(remote_mirror, retries) }
before do
project.repository.add_branch(project.first_owner, 'existing-branch', 'master')
allow(remote_mirror)
.to receive(:update_repository)
.and_return(double(divergent_refs: []))
end
it 'does not fetch the remote repository' do
# See https://gitlab.com/gitlab-org/gitaly/-/issues/2670
expect(project.repository).not_to receive(:fetch_remote)
execute!
end
it 'marks the mirror as started when beginning' do
expect(remote_mirror).to receive(:update_start!).and_call_original
execute!
end
it 'marks the mirror as successfully finished' do
result = execute!
expect(result[:status]).to eq(:success)
expect(remote_mirror).to be_finished
end
it 'marks the mirror as failed and raises the error when an unexpected error occurs' do
allow(remote_mirror).to receive(:update_repository).and_raise('Badly broken')
expect { execute! }.to raise_error(/Badly broken/)
expect(remote_mirror).to be_failed
expect(remote_mirror.last_error).to include('Badly broken')
end
context 'when the URL is blocked' do
before do
allow(Gitlab::UrlBlocker).to receive(:blocked_url?).and_return(true)
end
it 'hard retries and returns error status' do
expect(execute!).to eq(status: :error, message: 'The remote mirror URL is invalid.')
expect(remote_mirror).to be_to_retry
end
context 'when retries are exceeded' do
let(:retries) { 4 }
it 'hard fails and returns error status' do
expect(execute!).to eq(status: :error, message: 'The remote mirror URL is invalid.')
expect(remote_mirror).to be_failed
end
end
end
context "when given URLs containing escaped elements" do
it_behaves_like "URLs containing escaped elements return expected status" do
let(:result) { execute! }
before do
allow(remote_mirror).to receive(:url).and_return(url)
end
end
end
context 'when the update fails because of a `Gitlab::Git::CommandError`' do
before do
allow(remote_mirror).to receive(:update_repository)
.and_raise(Gitlab::Git::CommandError.new('update failed'))
end
it 'wraps `Gitlab::Git::CommandError`s in a service error' do
expect(execute!).to eq(status: :error, message: 'update failed')
end
it 'marks the mirror as to be retried' do
execute!
expect(remote_mirror).to be_to_retry
expect(remote_mirror.last_error).to include('update failed')
end
it "marks the mirror as failed after #{described_class::MAX_TRIES} tries" do
service.execute(remote_mirror, described_class::MAX_TRIES)
expect(remote_mirror).to be_failed
expect(remote_mirror.last_error).to include('update failed')
end
end
context 'when there are divergent refs' do
it 'marks the mirror as failed and sets an error message' do
response = double(divergent_refs: %w[refs/heads/master refs/heads/develop])
expect(remote_mirror).to receive(:update_repository).and_return(response)
execute!
expect(remote_mirror).to be_failed
expect(remote_mirror.last_error).to include("Some refs have diverged")
expect(remote_mirror.last_error).to include("refs/heads/master\n")
expect(remote_mirror.last_error).to include("refs/heads/develop")
end
end
context "sending lfs objects" do
let_it_be(:lfs_pointer) { create(:lfs_objects_project, project: project) }
before do
stub_lfs_setting(enabled: true)
end
it 'pushes LFS objects to a HTTP repository' do
expect_next_instance_of(Lfs::PushService) do |service|
expect(service).to receive(:execute)
end
expect(Gitlab::AppJsonLogger).not_to receive(:info)
execute!
expect(remote_mirror.update_status).to eq('finished')
expect(remote_mirror.last_error).to be_nil
end
context 'when LFS objects fail to push' do
before do
expect_next_instance_of(Lfs::PushService) do |service|
expect(service).to receive(:execute).and_return({ status: :error, message: 'unauthorized' })
end
end
context 'when remote_mirror_fail_on_lfs feature flag enabled' do
it 'fails update' do
expect(Gitlab::AppJsonLogger).to receive(:info).with(
hash_including(message: "Error synching remote mirror")).and_call_original
execute!
expect(remote_mirror.update_status).to eq('failed')
expect(remote_mirror.last_error).to eq("Error synchronizing LFS files:\n\nunauthorized\n\n")
end
end
context 'when remote_mirror_fail_on_lfs feature flag is disabled' do
before do
stub_feature_flags(remote_mirror_fail_on_lfs: false)
end
it 'does not fail update' do
expect(Gitlab::AppJsonLogger).to receive(:info).with(
hash_including(message: "Error synching remote mirror")).and_call_original
execute!
expect(remote_mirror.update_status).to eq('finished')
expect(remote_mirror.last_error).to be_nil
end
end
end
context 'with SSH repository' do
let(:ssh_mirror) { create(:remote_mirror, project: project, enabled: true) }
before do
allow(ssh_mirror)
.to receive(:update_repository)
.and_return(double(divergent_refs: []))
end
it 'does nothing to an SSH repository' do
ssh_mirror.update!(url: 'ssh://example.com')
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
service.execute(ssh_mirror, retries)
end
it 'does nothing if LFS is disabled' do
expect(project).to receive(:lfs_enabled?) { false }
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
service.execute(ssh_mirror, retries)
end
it 'does nothing if non-password auth is specified' do
ssh_mirror.update!(auth_method: 'ssh_public_key')
expect_any_instance_of(Lfs::PushService).not_to receive(:execute)
service.execute(ssh_mirror, retries)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveAccessService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
@project.with_transaction_returning_status do
if @project.namespace != source_project.namespace
@project.run_after_commit do
source_project.namespace.refresh_project_authorizations
self.namespace.refresh_project_authorizations
end
end
::Projects::MoveProjectMembersService.new(@project, @current_user)
.execute(source_project, remove_remaining_elements: remove_remaining_elements)
::Projects::MoveProjectGroupLinksService.new(@project, @current_user)
.execute(source_project, remove_remaining_elements: remove_remaining_elements)
::Projects::MoveProjectAuthorizationsService.new(@project, @current_user)
.execute(source_project, remove_remaining_elements: remove_remaining_elements)
@project.save(touch: false)
success
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveAccessService, feature_category: :groups_and_projects do
let(:user) { create(:user) }
let(:group) { create(:group) }
let(:project_with_access) { create(:project, namespace: user.namespace) }
let(:maintainer_user) { create(:user) }
let(:reporter_user) { create(:user) }
let(:developer_user) { create(:user) }
let(:maintainer_group) { create(:group) }
let(:reporter_group) { create(:group) }
let(:developer_group) { create(:group) }
before do
project_with_access.add_maintainer(maintainer_user)
project_with_access.add_developer(developer_user)
project_with_access.add_reporter(reporter_user)
project_with_access.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
project_with_access.project_group_links.create!(group: developer_group, group_access: Gitlab::Access::DEVELOPER)
project_with_access.project_group_links.create!(group: reporter_group, group_access: Gitlab::Access::REPORTER)
end
subject { described_class.new(target_project, user) }
describe '#execute' do
shared_examples 'move the accesses' do
it 'moves the accesses', :sidekiq_inline do
expect(project_with_access.project_members.count).to eq 4
expect(project_with_access.project_group_links.count).to eq 3
expect(project_with_access.authorized_users.count).to eq 4
subject.execute(project_with_access)
expect(project_with_access.project_members.count).to eq 0
expect(project_with_access.project_group_links.count).to eq 0
expect(project_with_access.authorized_users.count).to eq 1
expect(target_project.project_members.count).to eq 4
expect(target_project.project_group_links.count).to eq 3
expect(target_project.authorized_users.count).to eq 4
end
it 'rollbacks if an exception is raised' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_groups) }.to raise_error(StandardError)
expect(project_with_access.project_members.count).to eq 4
expect(project_with_access.project_group_links.count).to eq 3
expect(project_with_access.authorized_users.count).to eq 4
end
end
context 'when both projects are in the same namespace' do
let(:target_project) { create(:project, namespace: user.namespace) }
it 'does not refresh project owner authorized projects' do
allow(project_with_access).to receive(:namespace).and_return(user.namespace)
expect(project_with_access.namespace).not_to receive(:refresh_project_authorizations)
expect(target_project.namespace).not_to receive(:refresh_project_authorizations)
subject.execute(project_with_access)
end
it_behaves_like 'move the accesses'
end
context 'when projects are in different namespaces' do
let(:target_project) { create(:project, namespace: group) }
before do
group.add_owner(user)
end
it 'refreshes both project owner authorized projects' do
allow(project_with_access).to receive(:namespace).and_return(user.namespace)
expect(user.namespace).to receive(:refresh_project_authorizations).once
expect(group).to receive(:refresh_project_authorizations).once
subject.execute(project_with_access)
end
it_behaves_like 'move the accesses'
end
context 'when remove_remaining_elements is false' do
let(:target_project) { create(:project, namespace: user.namespace) }
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining memberships' do
target_project.add_maintainer(maintainer_user)
subject.execute(project_with_access, **options)
expect(project_with_access.project_members.count).not_to eq 0
end
it 'does not remove remaining group links' do
target_project.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
subject.execute(project_with_access, **options)
expect(project_with_access.project_group_links.count).not_to eq 0
end
it 'does not remove remaining authorizations' do
target_project.add_developer(developer_user)
subject.execute(project_with_access, **options)
expect(project_with_access.project_authorizations.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# NOTE: This service cannot be used directly because it is part of a
# a bigger process. Instead, use the service MoveAccessService which moves
# project memberships, project group links, authorizations and refreshes
# the authorizations if necessary
module Projects
class MoveProjectAuthorizationsService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
Project.transaction do
move_project_authorizations
remove_remaining_authorizations if remove_remaining_elements
success
end
end
private
def move_project_authorizations
non_existent_authorization.update_all(project_id: @project.id)
end
def remove_remaining_authorizations
# I think because the Project Authorization table does not have a primary key
# it brings a lot a problems/bugs. First, Rails raises PG::SyntaxException if we use
# destroy_all instead of delete_all.
source_project.project_authorizations.delete_all(:delete_all)
end
# Look for authorizations in source_project that are not in the target project
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_authorization
source_project.project_authorizations
.select(:user_id)
.where.not(user: @project.authorized_users)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveProjectAuthorizationsService, feature_category: :groups_and_projects do
let!(:user) { create(:user) }
let(:project_with_users) { create(:project, namespace: user.namespace) }
let(:target_project) { create(:project, namespace: user.namespace) }
let(:maintainer_user) { create(:user) }
let(:reporter_user) { create(:user) }
let(:developer_user) { create(:user) }
subject { described_class.new(target_project, user) }
describe '#execute' do
before do
project_with_users.add_maintainer(maintainer_user)
project_with_users.add_developer(developer_user)
project_with_users.add_reporter(reporter_user)
end
it 'moves the authorizations from one project to another' do
expect(project_with_users.authorized_users.count).to eq 4
expect(target_project.authorized_users.count).to eq 1
subject.execute(project_with_users)
expect(project_with_users.authorized_users.count).to eq 0
expect(target_project.authorized_users.count).to eq 4
end
it 'does not move existent authorizations to the current project' do
target_project.add_maintainer(developer_user)
target_project.add_developer(reporter_user)
expect(project_with_users.authorized_users.count).to eq 4
expect(target_project.authorized_users.count).to eq 3
subject.execute(project_with_users)
expect(project_with_users.authorized_users.count).to eq 0
expect(target_project.authorized_users.count).to eq 4
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining project authorizations' do
target_project.add_maintainer(developer_user)
target_project.add_developer(reporter_user)
subject.execute(project_with_users, **options)
expect(project_with_users.project_authorizations.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Tries to schedule a move for every project with repositories on the source shard
class ScheduleBulkRepositoryShardMovesService
include ScheduleBulkRepositoryShardMovesMethods
extend ::Gitlab::Utils::Override
private
override :repository_klass
def repository_klass
ProjectRepository
end
override :container_klass
def container_klass
Project
end
override :container_column
def container_column
:project_id
end
override :schedule_bulk_worker_klass
def self.schedule_bulk_worker_klass
::Projects::ScheduleBulkRepositoryShardMovesWorker
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ScheduleBulkRepositoryShardMovesService, feature_category: :source_code_management do
it_behaves_like 'moves repository shard in bulk' do
let_it_be_with_reload(:container) { create(:project, :repository) }
let(:move_service_klass) { Projects::RepositoryStorageMove }
let(:bulk_worker_klass) { ::Projects::ScheduleBulkRepositoryShardMovesWorker }
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class ReadmeRendererService < BaseService
include Rails.application.routes.url_helpers
TEMPLATE_PATH = Rails.root.join('app', 'views', 'projects', 'readme_templates')
def execute
render(params[:template_name] || :default)
end
private
def render(template_name)
ERB.new(File.read(sanitized_filename(template_name)), trim_mode: '<>').result(binding)
end
def sanitized_filename(template_name)
path = Gitlab::PathTraversal.check_path_traversal!("#{template_name}.md.tt")
path = TEMPLATE_PATH.join(path).to_s
Gitlab::PathTraversal.check_allowed_absolute_path!(path, [TEMPLATE_PATH.to_s])
path
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ReadmeRendererService, '#execute', feature_category: :groups_and_projects do
using RSpec::Parameterized::TableSyntax
subject(:service) { described_class.new(project, nil, opts) }
let_it_be(:project) { create(:project, title: 'My Project', description: '_custom_description_') }
let(:opts) { { default_branch: 'master' } }
it 'renders the an ERB readme template' do
expect(service.execute).to start_with(<<~MARKDOWN)
# My Project
_custom_description_
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
```
cd existing_repo
git remote add origin #{project.http_url_to_repo}
git branch -M master
git push -uf origin master
```
MARKDOWN
end
context 'with a custom template' do
before do
allow(File).to receive(:read).and_call_original
end
it 'renders that template file' do
opts[:template_name] = :custom_readme
expect(service).to receive(:sanitized_filename).with(:custom_readme).and_return('custom_readme.md.tt')
expect(File).to receive(:read).with('custom_readme.md.tt').and_return('_custom_readme_file_content_')
expect(service.execute).to eq('_custom_readme_file_content_')
end
context 'with path traversal in mind' do
where(:template_name, :exception, :expected_path) do
'../path/traversal/bad' | [Gitlab::PathTraversal::PathTraversalAttackError, 'Invalid path'] | nil
'/bad/template' | [StandardError, 'path /bad/template.md.tt is not allowed'] | nil
'good/template' | nil | 'good/template.md.tt'
end
with_them do
it 'raises the expected exception on bad paths' do
opts[:template_name] = template_name
if exception
expect { subject.execute }.to raise_error(*exception)
else
expect(File).to receive(:read).with(described_class::TEMPLATE_PATH.join(expected_path).to_s).and_return('')
expect { subject.execute }.not_to raise_error
end
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class DestroyService < BaseService
include Gitlab::ShellAdapter
DestroyError = Class.new(StandardError)
BATCH_SIZE = 100
def async_execute
project.update_attribute(:pending_delete, true)
job_id = ProjectDestroyWorker.perform_async(project.id, current_user.id, params)
log_info("User #{current_user.id} scheduled destruction of project #{project.full_path} with job ID #{job_id}")
end
def execute
return false unless can?(current_user, :remove_project, project)
project.update_attribute(:pending_delete, true)
# There is a possibility of active repository move processes for
# project and snippets. An attempt to delete the project at the same time
# can lead to race condition and an inconsistent state.
#
# This validation stops the project delete process if it detects active
# repository move schedules for it.
validate_active_repositories_move!
# Flush the cache for both repositories. This has to be done _before_
# removing the physical repositories as some expiration code depends on
# Git data (e.g. a list of branch names).
flush_caches(project)
::Ci::AbortPipelinesService.new.execute(project.all_pipelines, :project_deleted)
Projects::UnlinkForkService.new(project, current_user).execute(refresh_statistics: false)
attempt_destroy(project)
system_hook_service.execute_hooks_for(project, :destroy)
log_info("Project \"#{project.full_path}\" was deleted")
publish_project_deleted_event_for(project)
project.invalidate_personal_projects_count_of_owner
true
rescue StandardError => error
context = Gitlab::ApplicationContext.current.merge(project_id: project.id)
Gitlab::ErrorTracking.track_exception(error, **context)
attempt_rollback(project, error.message)
false
rescue Exception => error # rubocop:disable Lint/RescueException
# Project.transaction can raise Exception
attempt_rollback(project, error.message)
raise
end
private
def validate_active_repositories_move!
if project.repository_storage_moves.scheduled_or_started.exists?
raise_error(s_("DeleteProject|Couldn't remove the project. A project repository storage move is in progress. Try again when it's complete."))
end
if ::ProjectSnippet.by_project(project).with_repository_storage_moves.merge(::Snippets::RepositoryStorageMove.scheduled_or_started).exists?
raise_error(s_("DeleteProject|Couldn't remove the project. A related snippet repository storage move is in progress. Try again when it's complete."))
end
end
def trash_project_repositories!
unless remove_repository(project.repository)
raise_error(s_('DeleteProject|Failed to remove project repository. Please try again or contact administrator.'))
end
unless remove_repository(project.wiki.repository)
raise_error(s_('DeleteProject|Failed to remove wiki repository. Please try again or contact administrator.'))
end
unless remove_repository(project.design_repository)
raise_error(s_('DeleteProject|Failed to remove design repository. Please try again or contact administrator.'))
end
end
def trash_relation_repositories!
unless remove_snippets
raise_error(s_('DeleteProject|Failed to remove project snippets. Please try again or contact administrator.'))
end
end
def remove_snippets
# We're setting the skip_authorization param because we dont need to perform the access checks within the service since
# the user has enough access rights to remove the project and its resources.
response = ::Snippets::BulkDestroyService.new(current_user, project.snippets).execute(skip_authorization: true)
if response.error?
log_error("Snippet deletion failed on #{project.full_path} with the following message: #{response.message}")
end
response.success?
end
def destroy_events!
unless remove_events
raise_error(s_('DeleteProject|Failed to remove events. Please try again or contact administrator.'))
end
end
def remove_events
log_info("Attempting to destroy events from #{project.full_path} (#{project.id})")
response = ::Events::DestroyService.new(project).execute
if response.error?
log_error("Event deletion failed on #{project.full_path} with the following message: #{response.message}")
end
response.success?
end
def remove_repository(repository)
return true unless repository
result = Repositories::DestroyService.new(repository).execute
result[:status] == :success
end
def attempt_rollback(project, message)
return unless project
# It's possible that the project was destroyed, but some after_commit
# hook failed and caused us to end up here. A destroyed model will be a frozen hash,
# which cannot be altered.
unless project.destroyed?
# Restrict project visibility if the parent group visibility was made more restrictive while the project was scheduled for deletion.
visibility_level = project.visibility_level_allowed_by_group? ? project.visibility_level : project.group.visibility_level
project.update(delete_error: message, pending_delete: false, visibility_level: visibility_level)
end
log_error("Deletion failed on #{project.full_path} with the following message: #{message}")
end
def attempt_destroy(project)
unless remove_registry_tags
raise_error(s_('DeleteProject|Failed to remove some tags in project container registry. Please try again or contact administrator.'))
end
project.leave_pool_repository
destroy_project_related_records(project)
end
def destroy_project_related_records(project)
log_destroy_event
trash_relation_repositories!
trash_project_repositories!
destroy_events!
destroy_web_hooks!
destroy_project_bots!
destroy_ci_records!
destroy_mr_diff_relations!
destroy_merge_request_diffs!
# Rails attempts to load all related records into memory before
# destroying: https://github.com/rails/rails/issues/22510
# This ensures we delete records in batches.
#
# Exclude container repositories because its before_destroy would be
# called multiple times, and it doesn't destroy any database records.
project.destroy_dependent_associations_in_batches(exclude: [:container_repositories, :snippets])
project.destroy!
end
def log_destroy_event
log_info("Attempting to destroy #{project.full_path} (#{project.id})")
end
# Projects will have at least one merge_request_diff_commit for every commit
# contained in every MR, which deleting via `project.destroy!` and
# cascading deletes may exceed statement timeouts, causing failures.
# (see https://gitlab.com/gitlab-org/gitlab/-/issues/346166)
#
# Removing merge_request_diff_files records may also cause timeouts, so they
# can be deleted in batches as well.
#
# rubocop: disable CodeReuse/ActiveRecord
def destroy_mr_diff_relations!
delete_batch_size = 1000
project.merge_requests.each_batch(column: :iid, of: BATCH_SIZE) do |relation_ids|
[MergeRequestDiffCommit, MergeRequestDiffFile].each do |model|
loop do
inner_query = model
.select(:merge_request_diff_id, :relative_order)
.where(merge_request_diff_id: MergeRequestDiff.where(merge_request_id: relation_ids).select(:id))
.limit(delete_batch_size)
deleted_rows = model
.where("(#{model.table_name}.merge_request_diff_id, #{model.table_name}.relative_order) IN (?)", inner_query) # rubocop:disable GitlabSecurity/SqlInjection
.delete_all
break if deleted_rows == 0
end
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
# rubocop: disable CodeReuse/ActiveRecord
def destroy_merge_request_diffs!
delete_batch_size = 1000
project.merge_requests.each_batch(column: :iid, of: BATCH_SIZE) do |relation|
loop do
deleted_rows = MergeRequestDiff
.where(merge_request: relation)
.limit(delete_batch_size)
.delete_all
break if deleted_rows == 0
end
end
end
# rubocop: enable CodeReuse/ActiveRecord
def destroy_ci_records!
# Make sure to destroy this first just in case the project is undergoing stats refresh.
# This is to avoid logging the artifact deletion in Ci::JobArtifacts::DestroyBatchService.
project.build_artifacts_size_refresh&.destroy
project.all_pipelines.find_each(batch_size: BATCH_SIZE) do |pipeline| # rubocop: disable CodeReuse/ActiveRecord
# Destroy artifacts, then builds, then pipelines
# All builds have already been dropped by Ci::AbortPipelinesService,
# so no Ci::Build-instantiating cancellations happen here.
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71342#note_691523196
::Ci::DestroyPipelineService.new(project, current_user).execute(pipeline)
end
project.secure_files.find_each(batch_size: BATCH_SIZE) do |secure_file| # rubocop: disable CodeReuse/ActiveRecord
::Ci::DestroySecureFileService.new(project, current_user).execute(secure_file)
end
deleted_count = ::CommitStatus.for_project(project).delete_all
Gitlab::AppLogger.info(
class: 'Projects::DestroyService',
project_id: project.id,
message: 'leftover commit statuses',
orphaned_commit_status_count: deleted_count
)
end
# The project can have multiple webhooks with hundreds of thousands of web_hook_logs.
# By default, they are removed with "DELETE CASCADE" option defined via foreign_key.
# But such queries can exceed the statement_timeout limit and fail to delete the project.
# (see https://gitlab.com/gitlab-org/gitlab/-/issues/26259)
#
# To prevent that we use WebHooks::DestroyService. It deletes logs in batches and
# produces smaller and faster queries to the database.
def destroy_web_hooks!
project.hooks.find_each do |web_hook|
result = ::WebHooks::DestroyService.new(current_user).execute(web_hook)
unless result[:status] == :success
raise_error(s_('DeleteProject|Failed to remove webhooks. Please try again or contact administrator.'))
end
end
end
# The project can have multiple project bots with personal access tokens generated.
# We need to remove them when a project is deleted
# rubocop: disable CodeReuse/ActiveRecord
def destroy_project_bots!
members = project.members
.allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/422405')
.includes(:user).references(:user).merge(User.project_bot)
members.each do |member|
Users::DestroyService.new(current_user).execute(member.user, skip_authorization: true)
end
end
# rubocop: enable CodeReuse/ActiveRecord
def remove_registry_tags
return true unless Gitlab.config.registry.enabled
return false unless remove_legacy_registry_tags
results = []
project.container_repositories.find_each do |container_repository|
results << destroy_repository(project, container_repository)
end
results.all?
end
##
# This method makes sure that we correctly remove registry tags
# for legacy image repository (when repository path equals project path).
#
def remove_legacy_registry_tags
return true unless Gitlab.config.registry.enabled
root_repository = ::ContainerRepository.build_root_repository(project)
root_repository.has_tags? ? destroy_repository(project, root_repository) : true
end
def destroy_repository(project, repository)
service = ContainerRepository::DestroyService.new(project, current_user, { skip_permission_check: true })
response = service.execute(repository)
response[:status] == :success
end
def raise_error(message)
raise DestroyError, message
end
def flush_caches(project)
Projects::ForksCountService.new(project).delete_cache
end
def publish_project_deleted_event_for(project)
event = Projects::ProjectDeletedEvent.new(data: {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
})
Gitlab::EventStore.publish(event)
end
end
end
Projects::DestroyService.prepend_mod_with('Projects::DestroyService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::DestroyService, :aggregate_failures, :event_store_publisher, feature_category: :groups_and_projects do
include ProjectForksHelper
include BatchDestroyDependentAssociationsHelper
let_it_be(:user) { create(:user) }
let!(:project) { create(:project, :repository, namespace: user.namespace) }
let(:path) { project.repository.disk_path }
let(:async) { false } # execute or async_execute
before do
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: :any, tags: [])
end
shared_examples 'deleting the project' do
it 'deletes the project', :sidekiq_inline do
destroy_project(project, user, {})
expect(Project.unscoped.all).not_to include(project)
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey
end
it 'publishes a ProjectDeletedEvent' do
expected_data = {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
}
expect { destroy_project(project, user, {}) }.to publish_event(Projects::ProjectDeletedEvent).with(expected_data)
end
end
shared_examples 'deleting the project with pipeline and build' do
context 'with pipeline and build related records', :sidekiq_inline do # which has optimistic locking
let!(:pipeline) { create(:ci_pipeline, project: project) }
let!(:build) { create(:ci_build, :artifacts, :with_runner_session, pipeline: pipeline) }
let!(:trace_chunks) { create(:ci_build_trace_chunk, build: build) }
let!(:job_variables) { create(:ci_job_variable, job: build) }
let!(:report_result) { create(:ci_build_report_result, build: build) }
let!(:pending_state) { create(:ci_build_pending_state, build: build) }
let!(:pipeline_artifact) { create(:ci_pipeline_artifact, pipeline: pipeline) }
let!(:secure_file) { create(:ci_secure_file, project: project) }
it 'deletes build and pipeline related records' do
expect { destroy_project(project, user, {}) }
.to change { Ci::Build.count }.by(-1)
.and change { Ci::BuildTraceChunk.count }.by(-1)
.and change { Ci::JobArtifact.count }.by(-2)
.and change { Ci::DeletedObject.count }.by(2)
.and change { Ci::PipelineArtifact.count }.by(-1)
.and change { Ci::JobVariable.count }.by(-1)
.and change { Ci::BuildPendingState.count }.by(-1)
.and change { Ci::BuildReportResult.count }.by(-1)
.and change { Ci::BuildRunnerSession.count }.by(-1)
.and change { Ci::Pipeline.count }.by(-1)
.and change { Ci::SecureFile.count }.by(-1)
end
it 'avoids N+1 queries' do
recorder = ActiveRecord::QueryRecorder.new { destroy_project(project, user, {}) }
project = create(:project, :repository, namespace: user.namespace)
pipeline = create(:ci_pipeline, project: project)
builds = create_list(:ci_build, 3, :artifacts, pipeline: pipeline)
create(:ci_pipeline_artifact, pipeline: pipeline)
create_list(:ci_build_trace_chunk, 3, build: builds[0])
expect { destroy_project(project, project.first_owner, {}) }.not_to exceed_query_limit(recorder)
end
it_behaves_like 'deleting the project'
context 'when project is undergoing refresh' do
let!(:build_artifacts_size_refresh) { create(:project_build_artifacts_size_refresh, :pending, project: project) }
it 'does not log about artifact deletion but continues to delete artifacts' do
expect(Gitlab::ProjectStatsRefreshConflictsLogger).not_to receive(:warn_artifact_deletion_during_stats_refresh)
expect { destroy_project(project, user, {}) }
.to change { Ci::JobArtifact.count }.by(-2)
.and change { Projects::BuildArtifactsSizeRefresh.count }.by(-1)
end
end
end
end
shared_examples 'handles errors thrown during async destroy' do |error_message|
it 'does not allow the error to bubble up' do
expect do
destroy_project(project, user, {})
end.not_to raise_error
end
it 'reports the error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).and_call_original
destroy_project(project, user, {})
end
it 'unmarks the project as "pending deletion"' do
destroy_project(project, user, {})
expect(project.reload.pending_delete).to be(false)
end
it 'stores an error message in `projects.delete_error`' do
destroy_project(project, user, {})
expect(project.reload.delete_error).to be_present
expect(project.delete_error).to match(error_message)
end
context 'when parent group visibility was made more restrictive while project was marked "pending deletion"' do
let!(:group) { create(:group, :public) }
let!(:project) { create(:project, :repository, :public, namespace: group) }
it 'sets the project visibility level to that of the parent group' do
group.add_owner(user)
project.group.update_attribute(:visibility_level, Gitlab::VisibilityLevel::INTERNAL)
expect(project.reload.visibility_level).to be(Gitlab::VisibilityLevel::PUBLIC)
expect(project.group.visibility_level).to be(Gitlab::VisibilityLevel::INTERNAL)
destroy_project(project, user, {})
expect(project.reload.visibility_level).to be(Gitlab::VisibilityLevel::INTERNAL)
end
end
end
context "deleting a project with merge requests" do
let!(:merge_request) { create(:merge_request, source_project: project) }
before do
allow(project).to receive(:destroy!).and_return(true)
end
[MergeRequestDiffCommit, MergeRequestDiffFile].each do |model|
it "deletes #{model} records of the merge request" do
merge_request_diffs = merge_request.merge_request_diffs
expect(merge_request_diffs.size).to eq(1)
records_count = model.where(merge_request_diff_id: merge_request_diffs.first.id).count
expect { destroy_project(project, user, {}) }.to change { model.count }.by(-records_count)
end
end
end
context 'deleting a project with merge request diffs' do
let!(:merge_request) { create(:merge_request, source_project: project) }
let!(:another_project_mr) { create(:merge_request, source_project: create(:project)) }
it 'deletes merge request diffs' do
merge_request_diffs = merge_request.merge_request_diffs
expect(merge_request_diffs.size).to eq(1)
expect { destroy_project(project, user, {}) }.to change(MergeRequestDiff, :count).by(-1)
expect { another_project_mr.reload }.not_to raise_error
end
end
it_behaves_like 'deleting the project'
context 'personal projects count cache' do
context 'when the executor is the creator of the project itself' do
it 'invalidates personal_project_count cache of the the owner of the personal namespace' do
expect(user).to receive(:invalidate_personal_projects_count)
destroy_project(project, user, {})
end
end
context 'when the executor is the instance administrator', :enable_admin_mode do
it 'invalidates personal_project_count cache of the the owner of the personal namespace' do
expect(user).to receive(:invalidate_personal_projects_count)
destroy_project(project, create(:admin), {})
end
end
end
context 'with running pipelines' do
let!(:pipelines) { create_list(:ci_pipeline, 3, :running, project: project) }
let(:destroy_pipeline_service) { double('DestroyPipelineService', execute: nil) }
it 'bulks-fails with AbortPipelineService and then executes DestroyPipelineService for each pipelines' do
allow(::Ci::DestroyPipelineService).to receive(:new).and_return(destroy_pipeline_service)
expect(::Ci::AbortPipelinesService)
.to receive_message_chain(:new, :execute)
.with(project.all_pipelines, :project_deleted)
pipelines.each do |pipeline|
expect(destroy_pipeline_service).to receive(:execute).with(pipeline)
end
destroy_project(project, user, {})
end
end
context 'when project has remote mirrors' do
let!(:project) do
create(:project, :repository, namespace: user.namespace).tap do |project|
project.remote_mirrors.create!(url: 'http://test.com')
end
end
it 'destroys them' do
expect(RemoteMirror.count).to eq(1)
destroy_project(project, user, {})
expect(RemoteMirror.count).to eq(0)
end
end
context 'when project has exports' do
let!(:project_with_export) do
create(:project, :repository, namespace: user.namespace).tap do |project|
create(
:import_export_upload,
project: project,
export_file: fixture_file_upload('spec/fixtures/project_export.tar.gz')
)
end
end
it 'destroys project and export' do
expect do
destroy_project(project_with_export, user, {})
end.to change(ImportExportUpload, :count).by(-1)
expect(Project.all).not_to include(project_with_export)
end
end
context 'Sidekiq fake' do
before do
# Dont run sidekiq to check if renamed repository exists
Sidekiq::Testing.fake! { destroy_project(project, user, {}) }
end
it { expect(Project.all).not_to include(project) }
it do
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey
end
end
context 'when flushing caches fail due to Git errors' do
before do
allow(project.repository).to receive(:before_delete).and_raise(::Gitlab::Git::CommandError)
allow(Gitlab::GitLogger).to receive(:warn).with(
class: Repositories::DestroyService.name,
container_id: project.id,
disk_path: project.disk_path,
message: 'Gitlab::Git::CommandError').and_call_original
end
it_behaves_like 'deleting the project'
end
context 'when flushing caches fail due to Redis' do
before do
new_user = create(:user)
project.team.add_member(new_user, Gitlab::Access::DEVELOPER)
allow_any_instance_of(described_class).to receive(:flush_caches).and_raise(::Redis::CannotConnectError)
end
it 'keeps project team intact upon an error' do
perform_enqueued_jobs do
destroy_project(project, user, {})
rescue ::Redis::CannotConnectError
end
expect(project.team.members.count).to eq 2
end
end
context 'with async_execute', :sidekiq_inline do
let(:async) { true }
context 'async delete of project with private issue visibility' do
before do
project.project_feature.update_attribute("issues_access_level", ProjectFeature::PRIVATE)
end
it_behaves_like 'deleting the project'
end
it_behaves_like 'deleting the project with pipeline and build'
context 'errors' do
context 'when `remove_legacy_registry_tags` fails' do
before do
expect_any_instance_of(described_class)
.to receive(:remove_legacy_registry_tags).and_return(false)
end
it_behaves_like 'handles errors thrown during async destroy', /Failed to remove some tags/
end
context 'when `remove_repository` fails' do
before do
expect_any_instance_of(described_class)
.to receive(:remove_repository).and_return(false)
end
it_behaves_like 'handles errors thrown during async destroy', /Failed to remove/
end
context 'when `execute` raises expected error' do
before do
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(StandardError.new("Other error message"))
end
it_behaves_like 'handles errors thrown during async destroy', /Other error message/
end
context 'when `execute` raises unexpected error' do
before do
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(Exception.new('Other error message'))
end
it 'allows error to bubble up and rolls back project deletion' do
expect do
destroy_project(project, user, {})
end.to raise_error(Exception, 'Other error message')
expect(project.reload.pending_delete).to be(false)
expect(project.delete_error).to include("Other error message")
end
end
end
context 'for an archived project' do
before do
project.update!(archived: true)
end
it_behaves_like 'deleting the project with pipeline and build'
end
end
describe 'container registry' do
context 'when there are regular container repositories' do
let(:container_repository) { create(:container_repository) }
before do
stub_container_registry_tags(repository: project.full_path + '/image', tags: ['tag'])
project.container_repositories << container_repository
end
context 'when image repository deletion succeeds' do
it 'returns true' do
expect_next_instance_of(Projects::ContainerRepository::CleanupTagsService) do |instance|
expect(instance).to receive(:execute).and_return(status: :success)
end
expect(destroy_project(project, user)).to be true
end
end
context 'when image repository deletion raises an error' do
it 'returns false' do
expect_next_instance_of(Projects::ContainerRepository::CleanupTagsService) do |service|
expect(service).to receive(:execute).and_raise(RuntimeError)
end
expect(destroy_project(project, user)).to be false
end
end
context 'when image repository deletion fails' do
it 'returns false' do
expect_next_instance_of(Projects::ContainerRepository::DestroyService) do |service|
expect(service).to receive(:execute).and_return({ status: :error })
end
expect(destroy_project(project, user)).to be false
end
end
context 'when registry is disabled' do
before do
stub_container_registry_config(enabled: false)
end
it 'does not attempting to remove any tags' do
expect(Projects::ContainerRepository::DestroyService).not_to receive(:new)
destroy_project(project, user)
end
end
end
context 'when there are tags for legacy root repository' do
before do
stub_container_registry_tags(repository: project.full_path, tags: ['tag'])
end
context 'when image repository tags deletion succeeds' do
it 'removes tags' do
expect_next_instance_of(Projects::ContainerRepository::DestroyService) do |service|
expect(service).to receive(:execute).and_return({ status: :sucess })
end
destroy_project(project, user)
end
end
context 'when image repository tags deletion fails' do
it 'raises an exception' do
expect_next_instance_of(Projects::ContainerRepository::DestroyService) do |service|
expect(service).to receive(:execute).and_return({ status: :error })
end
expect(destroy_project(project, user)).to be false
end
end
end
context 'when there are no tags for legacy root repository' do
before do
stub_container_registry_tags(repository: project.full_path, tags: [])
end
it 'does not try to destroy the repository' do
expect(Projects::ContainerRepository::DestroyService).not_to receive(:new)
destroy_project(project, user)
end
end
end
context 'for a forked project with LFS objects' do
let(:forked_project) { fork_project(project, user) }
before do
project.lfs_objects << create(:lfs_object)
forked_project.reload
end
it 'destroys the fork' do
expect { destroy_project(forked_project, user) }
.not_to raise_error
end
it 'does not update project statistics for the deleted project' do
expect(ProjectCacheWorker).not_to receive(:perform_async)
destroy_project(forked_project, user)
end
end
context 'as the root of a fork network' do
let!(:fork_1) { fork_project(project, user) }
let!(:fork_2) { fork_project(project, user) }
it 'updates the fork network with the project name' do
fork_network = project.fork_network
destroy_project(project, user)
fork_network.reload
expect(fork_network.deleted_root_project_name).to eq(project.full_name)
expect(fork_network.root_project).to be_nil
end
end
context 'with related storage move records' do
context 'when project has active repository storage move records' do
let!(:project_repository_storage_move) { create(:project_repository_storage_move, :scheduled, container: project) }
it 'does not delete the project' do
expect(destroy_project(project, user)).to be_falsey
expect(project.delete_error).to eq "Couldn't remove the project. A project repository storage move is in progress. Try again when it's complete."
expect(project.pending_delete).to be_falsey
end
end
context 'when project has active snippet storage move records' do
let(:project_snippet) { create(:project_snippet, project: project) }
let!(:snippet_repository_storage_move) { create(:snippet_repository_storage_move, :started, container: project_snippet) }
it 'does not delete the project' do
expect(destroy_project(project, user)).to be_falsey
expect(project.delete_error).to eq "Couldn't remove the project. A related snippet repository storage move is in progress. Try again when it's complete."
expect(project.pending_delete).to be_falsey
end
end
end
context 'repository removal' do
describe '.trash_project_repositories!' do
let(:trash_project_repositories!) { described_class.new(project, user, {}).send(:trash_project_repositories!) }
# Destroys 3 repositories:
# 1. Project repository
# 2. Wiki repository
# 3. Design repository
it 'Repositories::DestroyService is called for existing repos' do
expect_next_instances_of(Repositories::DestroyService, 3) do |instance|
expect(instance).to receive(:execute).and_return(status: :success)
end
trash_project_repositories!
end
context 'when the removal has errors' do
using RSpec::Parameterized::TableSyntax
let(:mock_error) { instance_double(Repositories::DestroyService, execute: { message: 'foo', status: :error }) }
let(:project_repository) { project.repository }
let(:wiki_repository) { project.wiki.repository }
let(:design_repository) { project.design_repository }
where(:repo, :message) do
ref(:project_repository) | 'Failed to remove project repository. Please try again or contact administrator.'
ref(:wiki_repository) | 'Failed to remove wiki repository. Please try again or contact administrator.'
ref(:design_repository) | 'Failed to remove design repository. Please try again or contact administrator.'
end
with_them do
before do
allow(Repositories::DestroyService).to receive(:new).with(anything).and_call_original
allow(Repositories::DestroyService).to receive(:new).with(repo).and_return(mock_error)
end
it 'raises correct error' do
expect { trash_project_repositories! }.to raise_error(Projects::DestroyService::DestroyError, message)
end
end
end
end
it 'removes project repository' do
expect { destroy_project(project, user, {}) }.to change { project.repository.exists? }.from(true).to(false)
end
it 'removes wiki repository' do
project.create_wiki unless project.wiki.repository.exists?
expect { destroy_project(project, user, {}) }.to change { project.wiki.repository.exists? }.from(true).to(false)
end
it 'removes design repository' do
project.design_repository.create_if_not_exists
expect { destroy_project(project, user, {}) }.to change { project.design_repository.exists? }.from(true).to(false)
end
end
context 'snippets' do
let!(:snippet1) { create(:project_snippet, project: project, author: user) }
let!(:snippet2) { create(:project_snippet, project: project, author: user) }
it 'does not include snippets when deleting in batches' do
expect(project).to receive(:destroy_dependent_associations_in_batches).with({ exclude: [:container_repositories, :snippets] })
destroy_project(project, user)
end
it 'calls the bulk snippet destroy service with the skip_authorization param set to true' do
expect(project.snippets.count).to eq 2
expect_next_instance_of(Snippets::BulkDestroyService, user, project.snippets) do |instance|
expect(instance).to receive(:execute).with(skip_authorization: true).and_call_original
end
expect do
destroy_project(project, user)
end.to change(Snippet, :count).by(-2)
end
context 'when an error is raised deleting snippets' do
let(:error_message) { 'foo' }
it 'does not delete project' do
allow_next_instance_of(Snippets::BulkDestroyService) do |instance|
allow(instance).to receive(:execute).and_return(ServiceResponse.error(message: error_message))
end
expect(Gitlab::AppLogger).to receive(:error).with("Snippet deletion failed on #{project.full_path} with the following message: #{error_message}")
expect(Gitlab::AppLogger).to receive(:error).with(/Failed to remove project snippets/)
expect(destroy_project(project, user)).to be_falsey
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_truthy
end
end
end
context 'when project has webhooks' do
let!(:web_hook1) { create(:project_hook, project: project) }
let!(:web_hook2) { create(:project_hook, project: project) }
let!(:another_project_web_hook) { create(:project_hook) }
let!(:web_hook_log) { create(:web_hook_log, web_hook: web_hook1) }
it 'deletes webhooks and logs related to project' do
expect_next_instance_of(WebHooks::DestroyService, user) do |instance|
expect(instance).to receive(:execute).with(web_hook1).and_call_original
end
expect_next_instance_of(WebHooks::DestroyService, user) do |instance|
expect(instance).to receive(:execute).with(web_hook2).and_call_original
end
expect do
destroy_project(project, user)
end.to change(WebHook, :count).by(-2)
end
context 'when an error is raised deleting webhooks' do
before do
allow_next_instance_of(WebHooks::DestroyService) do |instance|
allow(instance).to receive(:execute).and_return(message: 'foo', status: :error)
end
end
it_behaves_like 'handles errors thrown during async destroy', /Failed to remove webhooks/
end
end
context 'when project has project bots' do
let!(:project_bot) { create(:user, :project_bot).tap { |user| project.add_maintainer(user) } }
it 'deletes bot user as well' do
expect_next_instance_of(Users::DestroyService, user) do |instance|
expect(instance).to receive(:execute).with(project_bot, skip_authorization: true).and_call_original
end
destroy_project(project, user)
end
end
context 'when project has events' do
let!(:event) { create(:event, :created, project: project, target: project, author: user) }
it 'deletes events from the project' do
expect do
destroy_project(project, user)
end.to change(Event, :count).by(-1)
end
context 'when an error is returned while deleting events' do
it 'does not delete project' do
allow_next_instance_of(Events::DestroyService) do |instance|
allow(instance).to receive(:execute).and_return(ServiceResponse.error(message: 'foo'))
end
expect(destroy_project(project, user)).to be_falsey
expect(project.delete_error).to include('Failed to remove events')
end
end
end
context 'error while destroying', :sidekiq_inline do
let!(:pipeline) { create(:ci_pipeline, project: project) }
let!(:builds) { create_list(:ci_build, 2, :artifacts, pipeline: pipeline) }
let!(:build_trace) { create(:ci_build_trace_chunk, build: builds[0]) }
it 'deletes on retry' do
# We can expect this to timeout for very large projects
# TODO: remove allow_next_instance_of: https://gitlab.com/gitlab-org/gitlab/-/issues/220440
allow_any_instance_of(Ci::Build).to receive(:destroy).and_raise('boom')
destroy_project(project, user, {})
allow_any_instance_of(Ci::Build).to receive(:destroy).and_call_original
destroy_project(project, user, {})
expect(Project.unscoped.all).not_to include(project)
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey
expect(project.all_pipelines).to be_empty
expect(project.builds).to be_empty
end
end
context 'associations destoyed in batches' do
let!(:merge_request) { create(:merge_request, source_project: project) }
let!(:issue) { create(:issue, project: project) }
let!(:label) { create(:label, project: project) }
it 'destroys the associations marked as `dependent: :destroy`, in batches' do
query_recorder = ActiveRecord::QueryRecorder.new do
destroy_project(project, user, {})
end
expect(project.merge_requests).to be_empty
expect(project.issues).to be_empty
expect(project.labels).to be_empty
expected_queries = [
delete_in_batches_regexps(:merge_requests, :target_project_id, project, [merge_request]),
delete_in_batches_regexps(:issues, :project_id, project, [issue]),
delete_in_batches_regexps(:labels, :project_id, project, [label])
].flatten
expect(query_recorder.log).to include(*expected_queries)
end
end
def destroy_project(project, user, params = {})
described_class.new(project, user, params).public_send(async ? :async_execute : :execute)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# NOTE: This service cannot be used directly because it is part of a
# a bigger process. Instead, use the service MoveAccessService which moves
# project memberships, project group links, authorizations and refreshes
# the authorizations if necessary
module Projects
class MoveProjectGroupLinksService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
Project.transaction do
move_group_links
remove_remaining_project_group_links if remove_remaining_elements
success
end
end
private
def move_group_links
non_existent_group_links.update_all(project_id: @project.id)
end
# Remove remaining project group links from source_project
def remove_remaining_project_group_links
source_project.reset.project_group_links.destroy_all # rubocop: disable Cop/DestroyAll
end
def group_links_in_target_project
@project.project_group_links.select(:group_id)
end
# Look for groups in source_project that are not in the target project
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_group_links
source_project.project_group_links
.where.not(group_id: group_links_in_target_project)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveProjectGroupLinksService, feature_category: :groups_and_projects do
let!(:user) { create(:user) }
let(:project_with_groups) { create(:project, namespace: user.namespace) }
let(:target_project) { create(:project, namespace: user.namespace) }
let(:maintainer_group) { create(:group) }
let(:reporter_group) { create(:group) }
let(:developer_group) { create(:group) }
subject { described_class.new(target_project, user) }
describe '#execute' do
before do
project_with_groups.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
project_with_groups.project_group_links.create!(group: developer_group, group_access: Gitlab::Access::DEVELOPER)
project_with_groups.project_group_links.create!(group: reporter_group, group_access: Gitlab::Access::REPORTER)
end
it 'moves the group links from one project to another' do
expect(project_with_groups.project_group_links.count).to eq 3
expect(target_project.project_group_links.count).to eq 0
subject.execute(project_with_groups)
expect(project_with_groups.project_group_links.count).to eq 0
expect(target_project.project_group_links.count).to eq 3
end
it 'does not move existent group links in the current project' do
target_project.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
target_project.project_group_links.create!(group: developer_group, group_access: Gitlab::Access::DEVELOPER)
expect(project_with_groups.project_group_links.count).to eq 3
expect(target_project.project_group_links.count).to eq 2
subject.execute(project_with_groups)
expect(project_with_groups.project_group_links.count).to eq 0
expect(target_project.project_group_links.count).to eq 3
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_groups) }.to raise_error(StandardError)
expect(project_with_groups.project_group_links.count).to eq 3
expect(target_project.project_group_links.count).to eq 0
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining project group links' do
target_project.project_group_links.create!(group: maintainer_group, group_access: Gitlab::Access::MAINTAINER)
target_project.project_group_links.create!(group: developer_group, group_access: Gitlab::Access::DEVELOPER)
subject.execute(project_with_groups, **options)
expect(project_with_groups.project_group_links.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UpdateService < BaseService
include UpdateVisibilityLevel
include ValidatesClassificationLabel
ValidationError = Class.new(StandardError)
def execute
build_topics
remove_unallowed_params
add_pages_unique_domain
validate!
ensure_wiki_exists if enabling_wiki?
if changing_repository_storage?
storage_move = project.repository_storage_moves.build(
source_storage_name: project.repository_storage,
destination_storage_name: params.delete(:repository_storage)
)
storage_move.schedule
end
yield if block_given?
validate_classification_label(project, :external_authorization_classification_label)
# If the block added errors, don't try to save the project
return update_failed! if project.errors.any?
if project.update(params.except(:default_branch))
after_update
success
else
update_failed!
end
rescue ValidationError => e
error(e.message)
end
def run_auto_devops_pipeline?
return false if project.repository.gitlab_ci_yml || !project.auto_devops&.previous_changes&.include?('enabled')
project.auto_devops_enabled?
end
private
def add_pages_unique_domain
return unless params.dig(:project_setting_attributes, :pages_unique_domain_enabled)
Gitlab::Pages.add_unique_domain_to(project)
end
def validate!
unless valid_visibility_level_change?(project, project.visibility_attribute_value(params))
raise_validation_error(s_('UpdateProject|New visibility level not allowed!'))
end
validate_default_branch_change
validate_renaming_project_with_tags
end
def validate_default_branch_change
return unless changing_default_branch?
previous_default_branch = project.default_branch
new_default_branch = params[:default_branch]
if project.change_head(new_default_branch)
params[:previous_default_branch] = previous_default_branch
if !project.root_ref?(new_default_branch) && has_custom_head_branch?
raise_validation_error(
format(
s_("UpdateProject|Could not set the default branch. Do you have a branch named 'HEAD' in your repository? (%{linkStart}How do I fix this?%{linkEnd})"),
linkStart: ambiguous_head_documentation_link, linkEnd: '</a>'
).html_safe
)
end
after_default_branch_change(previous_default_branch)
else
raise_validation_error(s_("UpdateProject|Could not set the default branch"))
end
end
def validate_renaming_project_with_tags
return unless renaming_project_with_container_registry_tags?
unless Feature.enabled?(:renaming_project_with_tags, project) &&
ContainerRegistry::GitlabApiClient.supports_gitlab_api?
raise ValidationError, s_('UpdateProject|Cannot rename project because it contains container registry tags!')
end
dry_run = ContainerRegistry::GitlabApiClient.rename_base_repository_path(
project.full_path, name: params[:path], dry_run: true)
return if dry_run == :accepted
log_error("Dry run failed for renaming project with tags: #{project.full_path}, error: #{dry_run}")
raise_validation_error(
format(
s_("UpdateProject|Cannot rename project, the container registry path rename validation failed: %{error}"),
error: dry_run.to_s.titleize
)
)
end
def ambiguous_head_documentation_link
url = Rails.application.routes.url_helpers.help_page_path('user/project/repository/branches/index', anchor: 'error-ambiguous-head-branch-exists')
format('<a href="%{url}" target="_blank" rel="noopener noreferrer">', url: url)
end
# See issue: https://gitlab.com/gitlab-org/gitlab/-/issues/381731
def has_custom_head_branch?
project.repository.branch_names.any? { |name| name.casecmp('head') == 0 }
end
def after_default_branch_change(previous_default_branch)
# overridden by EE module
end
# overridden by EE module
def remove_unallowed_params
params.delete(:emails_enabled) unless can?(current_user, :set_emails_disabled, project)
params.delete(:runner_registration_enabled) if Gitlab::CurrentSettings.valid_runner_registrars.exclude?('project')
end
def after_update
todos_features_changes = %w[
issues_access_level
merge_requests_access_level
repository_access_level
]
project_changed_feature_keys = project.project_feature.previous_changes.keys
if project.visibility_level_previous_changes && project.private?
# don't enqueue immediately to prevent todos removal in case of a mistake
TodosDestroyer::ConfidentialIssueWorker.perform_in(Todo::WAIT_FOR_DELETE, nil, project.id)
TodosDestroyer::ProjectPrivateWorker.perform_in(Todo::WAIT_FOR_DELETE, project.id)
elsif (project_changed_feature_keys & todos_features_changes).present?
TodosDestroyer::PrivateFeaturesWorker.perform_in(Todo::WAIT_FOR_DELETE, project.id)
end
if project.previous_changes.include?('path')
after_rename_service(project).execute
else
system_hook_service.execute_hooks_for(project, :update)
end
update_pending_builds if runners_settings_toggled?
publish_events
end
def after_rename_service(project)
AfterRenameService.new(project, path_before: project.path_before_last_save, full_path_before: project.full_path_before_last_save)
end
def raise_validation_error(message)
raise ValidationError, message
end
def update_failed!
model_errors = project.errors.full_messages.to_sentence
error_message = model_errors.presence || s_('UpdateProject|Project could not be updated!')
error(error_message)
end
def renaming_project_with_container_registry_tags?
new_path = params[:path]
new_path && new_path != project.path &&
project.has_container_registry_tags?
end
def changing_default_branch?
new_branch = params[:default_branch]
new_branch && project.repository.exists? &&
new_branch != project.default_branch
end
def enabling_wiki?
return false if project.wiki_enabled?
params.dig(:project_feature_attributes, :wiki_access_level).to_i > ProjectFeature::DISABLED
end
def ensure_wiki_exists
return if project.create_wiki
log_error("Could not create wiki for #{project.full_name}")
Gitlab::Metrics.counter(:wiki_can_not_be_created_total, 'Counts the times we failed to create a wiki').increment
end
def changing_repository_storage?
new_repository_storage = params[:repository_storage]
new_repository_storage && project.repository.exists? &&
project.repository_storage != new_repository_storage &&
can?(current_user, :change_repository_storage, project)
end
def build_topics
topics = params.delete(:topics)
tag_list = params.delete(:tag_list)
topic_list = topics || tag_list
params[:topic_list] ||= topic_list if topic_list
end
def update_pending_builds
update_params = {
instance_runners_enabled: project.shared_runners_enabled?,
namespace_traversal_ids: group_runner_traversal_ids
}
::Ci::UpdatePendingBuildService
.new(project, update_params)
.execute
end
def shared_runners_settings_toggled?
project.previous_changes.include?(:shared_runners_enabled)
end
def group_runners_settings_toggled?
return false unless project.ci_cd_settings.present?
project.ci_cd_settings.previous_changes.include?(:group_runners_enabled)
end
def runners_settings_toggled?
shared_runners_settings_toggled? || group_runners_settings_toggled?
end
def group_runner_traversal_ids
if project.group_runners_enabled?
project.namespace.traversal_ids
else
[]
end
end
def publish_events
publish_project_archived_event
publish_project_attributed_changed_event
publish_project_features_changed_event
end
def publish_project_archived_event
return unless project.archived_previously_changed?
event = Projects::ProjectArchivedEvent.new(data: {
project_id: @project.id,
namespace_id: @project.namespace_id,
root_namespace_id: @project.root_namespace.id
})
Gitlab::EventStore.publish(event)
end
def publish_project_attributed_changed_event
changes = @project.previous_changes
return if changes.blank?
event = Projects::ProjectAttributesChangedEvent.new(data: {
project_id: @project.id,
namespace_id: @project.namespace_id,
root_namespace_id: @project.root_namespace.id,
attributes: changes.keys
})
Gitlab::EventStore.publish(event)
end
def publish_project_features_changed_event
changes = @project.project_feature.previous_changes
return if changes.blank?
event = Projects::ProjectFeaturesChangedEvent.new(data: {
project_id: @project.id,
namespace_id: @project.namespace_id,
root_namespace_id: @project.root_namespace.id,
features: changes.keys
})
Gitlab::EventStore.publish(event)
end
end
end
Projects::UpdateService.prepend_mod_with('Projects::UpdateService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::UpdateService, feature_category: :groups_and_projects do
include ExternalAuthorizationServiceHelpers
include ProjectForksHelper
let(:user) { create(:user) }
let(:project) do
create(:project, creator: user, namespace: user.namespace)
end
shared_examples 'publishing Projects::ProjectAttributesChangedEvent' do |params:, attributes:|
it "publishes Projects::ProjectAttributesChangedEvent" do
expect { update_project(project, user, params) }
.to publish_event(Projects::ProjectAttributesChangedEvent)
.with(
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id,
attributes: attributes
)
end
end
describe '#execute' do
let(:admin) { create(:admin) }
context 'when changing visibility level' do
it_behaves_like 'publishing Projects::ProjectAttributesChangedEvent',
params: { visibility_level: Gitlab::VisibilityLevel::INTERNAL },
attributes: %w[updated_at visibility_level]
context 'when visibility_level changes to INTERNAL' do
it 'updates the project to internal' do
expect(TodosDestroyer::ProjectPrivateWorker).not_to receive(:perform_in)
result = update_project(project, user, visibility_level: Gitlab::VisibilityLevel::INTERNAL)
expect(result).to eq({ status: :success })
expect(project).to be_internal
end
end
context 'when visibility_level changes to PUBLIC' do
it 'updates the project to public' do
expect(TodosDestroyer::ProjectPrivateWorker).not_to receive(:perform_in)
result = update_project(project, user, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :success })
expect(project).to be_public
end
context 'and project is PRIVATE' do
it 'does not unlink project from fork network' do
expect(Projects::UnlinkForkService).not_to receive(:new)
update_project(project, user, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
end
end
end
context 'when visibility_level changes to PRIVATE' do
before do
project.update!(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
end
it 'updates the project to private' do
expect(TodosDestroyer::ProjectPrivateWorker).to receive(:perform_in).with(Todo::WAIT_FOR_DELETE, project.id)
expect(TodosDestroyer::ConfidentialIssueWorker).to receive(:perform_in).with(Todo::WAIT_FOR_DELETE, nil, project.id)
result = update_project(project, user, visibility_level: Gitlab::VisibilityLevel::PRIVATE)
expect(result).to eq({ status: :success })
expect(project).to be_private
end
end
context 'when visibility levels are restricted to PUBLIC only' do
before do
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::PUBLIC])
end
context 'when visibility_level is INTERNAL' do
it 'updates the project to internal' do
result = update_project(project, user, visibility_level: Gitlab::VisibilityLevel::INTERNAL)
expect(result).to eq({ status: :success })
expect(project).to be_internal
end
end
context 'when visibility_level is PUBLIC' do
it 'does not update the project to public' do
result = update_project(project, user, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_private
end
context 'when updated by an admin' do
context 'when admin mode is enabled', :enable_admin_mode do
it 'updates the project to public' do
result = update_project(project, admin, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :success })
expect(project).to be_public
end
end
context 'when admin mode is disabled' do
it 'does not update the project to public' do
result = update_project(project, admin, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_private
end
end
end
end
end
context 'when project visibility is higher than parent group' do
let(:group) { create(:group, visibility_level: Gitlab::VisibilityLevel::INTERNAL) }
before do
project.update!(namespace: group, visibility_level: group.visibility_level)
end
it 'does not update project visibility level even if admin', :enable_admin_mode do
result = update_project(project, admin, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :error, message: 'Visibility level public is not allowed in a internal group.' })
expect(project.reload).to be_internal
end
end
context 'when user is not project owner' do
let_it_be(:maintainer) { create(:user) }
before do
project.add_maintainer(maintainer)
end
context 'when project is private' do
it 'does not update the project to public' do
result = update_project(project, maintainer, visibility_level: Gitlab::VisibilityLevel::PUBLIC)
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_private
end
it 'does not update the project to public with tricky value' do
result = update_project(project, maintainer, visibility_level: Gitlab::VisibilityLevel::PUBLIC.to_s + 'r')
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_private
end
end
context 'when project is public' do
before do
project.update!(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
end
it 'does not update the project to private' do
result = update_project(project, maintainer, visibility_level: Gitlab::VisibilityLevel::PRIVATE)
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_public
end
it 'does not update the project to private with invalid string value' do
result = update_project(project, maintainer, visibility_level: 'invalid')
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_public
end
it 'does not update the project to private with valid string value' do
result = update_project(project, maintainer, visibility_level: 'private')
expect(result).to eq({ status: :error, message: 'New visibility level not allowed!' })
expect(project).to be_public
end
# See https://gitlab.com/gitlab-org/gitlab/-/issues/359910
it 'does not update the project to private because of Active Record typecasting' do
result = update_project(project, maintainer, visibility_level: 'public')
expect(result).to eq({ status: :success })
expect(project).to be_public
end
end
end
context 'when updating shared runners' do
context 'can enable shared runners' do
let(:group) { create(:group, shared_runners_enabled: true) }
let(:project) { create(:project, namespace: group, shared_runners_enabled: false) }
it 'enables shared runners' do
result = update_project(project, user, shared_runners_enabled: true)
expect(result).to eq({ status: :success })
expect(project.reload.shared_runners_enabled).to be_truthy
end
end
context 'cannot enable shared runners' do
let(:group) { create(:group, :shared_runners_disabled) }
let(:project) { create(:project, namespace: group, shared_runners_enabled: false) }
it 'does not enable shared runners' do
result = update_project(project, user, shared_runners_enabled: true)
expect(result).to eq({ status: :error, message: 'Shared runners enabled cannot be enabled because parent group does not allow it' })
expect(project.reload.shared_runners_enabled).to be_falsey
end
end
end
end
describe 'when updating project that has forks' do
let(:project) { create(:project, :internal) }
let(:user) { project.first_owner }
let(:forked_project) { fork_project(project) }
it 'does not change visibility of forks' do
opts = { visibility_level: Gitlab::VisibilityLevel::PRIVATE }
expect(project).to be_internal
expect(forked_project).to be_internal
expect(update_project(project, user, opts)).to eq({ status: :success })
expect(project).to be_private
expect(forked_project.reload).to be_internal
end
end
context 'when updating a default branch' do
let(:project) { create(:project, :repository) }
it 'changes default branch, tracking the previous branch' do
previous_default_branch = project.default_branch
update_project(project, admin, default_branch: 'feature')
project.reload
expect(project.default_branch).to eq('feature')
expect(project.previous_default_branch).to eq(previous_default_branch)
update_project(project, admin, default_branch: previous_default_branch)
project.reload
expect(project.default_branch).to eq(previous_default_branch)
expect(project.previous_default_branch).to eq('feature')
end
it 'does not change a default branch' do
# The branch 'unexisted-branch' does not exist.
update_project(project, admin, default_branch: 'unexisted-branch')
project.reload
expect(project.default_branch).to eq 'master'
expect(project.previous_default_branch).to be_nil
end
context 'when repository has an ambiguous branch named "HEAD"' do
before do
allow(project.repository.raw).to receive(:write_ref).and_return(false)
allow(project.repository).to receive(:branch_names) { %w[fix master main HEAD] }
end
it 'returns an error to the user' do
result = update_project(project, admin, default_branch: 'fix')
expect(result).to include(status: :error)
expect(result[:message]).to include("Could not set the default branch. Do you have a branch named 'HEAD' in your repository?")
project.reload
expect(project.default_branch).to eq 'master'
expect(project.previous_default_branch).to be_nil
end
end
end
context 'when we update project but not enabling a wiki' do
it 'does not try to create an empty wiki' do
project.wiki.repository.raw.remove
result = update_project(project, user, { name: 'test1' })
expect(result).to eq({ status: :success })
expect(project.wiki_repository_exists?).to be false
end
it 'handles empty project feature attributes' do
project.project_feature.update!(wiki_access_level: ProjectFeature::DISABLED)
result = update_project(project, user, { name: 'test1' })
expect(result).to eq({ status: :success })
expect(project.wiki_repository_exists?).to be false
end
end
context 'when enabling a wiki' do
it 'creates a wiki' do
project.project_feature.update!(wiki_access_level: ProjectFeature::DISABLED)
project.wiki.repository.raw.remove
result = update_project(project, user, project_feature_attributes: { wiki_access_level: ProjectFeature::ENABLED })
expect(result).to eq({ status: :success })
expect(project.wiki_repository_exists?).to be true
expect(project.wiki_enabled?).to be true
end
it 'logs an error and creates a metric when wiki can not be created' do
project.project_feature.update!(wiki_access_level: ProjectFeature::DISABLED)
expect_next_instance_of(ProjectWiki) do |project_wiki|
expect(project_wiki).to receive(:create_wiki_repository).and_raise(Wiki::CouldNotCreateWikiError)
end
expect_any_instance_of(described_class).to receive(:log_error).with("Could not create wiki for #{project.full_name}")
counter = double(:counter)
expect(Gitlab::Metrics).to receive(:counter).with(:wiki_can_not_be_created_total, 'Counts the times we failed to create a wiki').and_return(counter)
expect(counter).to receive(:increment)
update_project(project, user, project_feature_attributes: { wiki_access_level: ProjectFeature::ENABLED })
end
end
context 'when changing feature visibility to private' do
it 'updates the visibility correctly' do
expect(TodosDestroyer::PrivateFeaturesWorker)
.to receive(:perform_in).with(Todo::WAIT_FOR_DELETE, project.id)
result = update_project(project, user, project_feature_attributes:
{ issues_access_level: ProjectFeature::PRIVATE }
)
expect(result).to eq({ status: :success })
expect(project.project_feature.issues_access_level).to be(ProjectFeature::PRIVATE)
end
end
context 'when changes project features' do
# Using some sample features for testing.
# Not using all the features because some of them must be enabled/disabled together
%w[issues wiki forking model_experiments].each do |feature_name|
context "with feature_name:#{feature_name}" do
let(:feature) { "#{feature_name}_access_level" }
let(:params) do
{ project_feature_attributes: { feature => ProjectFeature::ENABLED } }
end
before do
project.project_feature.update!(feature => ProjectFeature::DISABLED)
end
it 'publishes Projects::ProjectFeaturesChangedEvent' do
expect { update_project(project, user, params) }
.to publish_event(Projects::ProjectFeaturesChangedEvent)
.with(
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id,
features: array_including(feature, "updated_at")
)
end
end
end
end
context 'when archiving a project' do
it_behaves_like 'publishing Projects::ProjectAttributesChangedEvent',
params: { archived: true },
attributes: %w[updated_at archived]
it 'publishes a ProjectTransferedEvent' do
expect { update_project(project, user, archived: true) }
.to publish_event(Projects::ProjectArchivedEvent)
.with(
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
)
end
end
context 'when changing operations feature visibility' do
let(:feature_params) { { operations_access_level: ProjectFeature::DISABLED } }
it 'does not sync the changes to the related fields' do
result = update_project(project, user, project_feature_attributes: feature_params)
expect(result).to eq({ status: :success })
feature = project.project_feature
expect(feature.operations_access_level).to eq(ProjectFeature::DISABLED)
expect(feature.monitor_access_level).not_to eq(ProjectFeature::DISABLED)
expect(feature.infrastructure_access_level).not_to eq(ProjectFeature::DISABLED)
expect(feature.feature_flags_access_level).not_to eq(ProjectFeature::DISABLED)
expect(feature.environments_access_level).not_to eq(ProjectFeature::DISABLED)
end
end
context 'when updating a project that contains container images' do
let(:new_name) { 'renamed' }
before do
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: /image/, tags: %w[rc1])
create(:container_repository, project: project, name: :image)
end
shared_examples 'renaming the project fails with message' do |error_message|
it 'does not allow to rename the project' do
result = update_project(project, admin, path: new_name)
expect(result).to include(status: :error)
expect(result[:message]).to match(error_message)
end
end
context 'when feature renaming_project_with_tags is disabled' do
before do
stub_feature_flags(renaming_project_with_tags: false)
end
it_behaves_like 'renaming the project fails with message', /contains container registry tags/
end
context "when the GitlabAPI is not supported" do
before do
allow(ContainerRegistry::GitlabApiClient).to receive(:supports_gitlab_api?).and_return(false)
end
it_behaves_like 'renaming the project fails with message', /contains container registry tags/
end
context 'when Gitlab API is supported' do
before do
allow(ContainerRegistry::GitlabApiClient).to receive(:supports_gitlab_api?).and_return(true)
end
it 'executes a dry run of the project rename' do
stub_rename_base_repository_in_registry(dry_run: true)
update_project(project, admin, path: new_name)
expect_rename_of_base_repository_in_registry(dry_run: true)
end
context 'when the dry run fails' do
before do
stub_rename_base_repository_in_registry(dry_run: true, result: :bad_request)
end
it_behaves_like 'renaming the project fails with message', /container registry path rename validation failed/
it 'logs the error' do
expect_any_instance_of(described_class).to receive(:log_error).with("Dry run failed for renaming project with tags: #{project.full_path}, error: bad_request")
update_project(project, admin, path: new_name)
end
end
context 'when the dry run succeeds' do
before do
stub_rename_base_repository_in_registry(dry_run: true, result: :accepted)
end
it 'continues with the project rename' do
stub_rename_base_repository_in_registry(dry_run: false, result: :ok)
old_project_full_path = project.full_path
update_project(project, admin, path: new_name)
expect_rename_of_base_repository_in_registry(dry_run: true, path: old_project_full_path)
expect_rename_of_base_repository_in_registry(dry_run: false, path: old_project_full_path)
end
end
def stub_rename_base_repository_in_registry(dry_run:, result: nil)
options = { name: new_name }
options[:dry_run] = true if dry_run
allow(ContainerRegistry::GitlabApiClient)
.to receive(:rename_base_repository_path)
.with(project.full_path, options)
.and_return(result)
end
def expect_rename_of_base_repository_in_registry(dry_run:, path: nil)
options = { name: new_name }
options[:dry_run] = true if dry_run
expect(ContainerRegistry::GitlabApiClient)
.to have_received(:rename_base_repository_path)
.with(path || project.full_path, options)
end
end
it 'allows to update other settings' do
result = update_project(project, admin, public_builds: true)
expect(result[:status]).to eq :success
expect(project.reload.public_builds).to be true
end
end
context 'when renaming a project' do
let(:raw_fake_repo) { Gitlab::Git::Repository.new('default', File.join(user.namespace.full_path, 'existing.git'), nil, nil) }
context 'with legacy storage' do
let(:project) { create(:project, :legacy_storage, :repository, creator: user, namespace: user.namespace) }
before do
raw_fake_repo.create_repository
end
after do
raw_fake_repo.remove
end
it 'does not allow renaming when new path matches existing repository on disk' do
result = update_project(project, admin, path: 'existing')
expect(result).to include(status: :error)
expect(result[:message]).to match('There is already a repository with that name on disk')
expect(project).not_to be_valid
expect(project.errors.messages).to have_key(:base)
expect(project.errors.messages[:base]).to include('There is already a repository with that name on disk')
end
context 'when hashed storage is enabled' do
before do
stub_application_setting(hashed_storage_enabled: true)
end
it 'migrates project to a hashed storage instead of renaming the repo to another legacy name' do
result = update_project(project, admin, path: 'new-path')
expect(result).not_to include(status: :error)
expect(project).to be_valid
expect(project.errors).to be_empty
expect(project.reload.hashed_storage?(:repository)).to be_truthy
end
end
end
context 'with hashed storage' do
let(:project) { create(:project, :repository, creator: user, namespace: user.namespace) }
before do
stub_application_setting(hashed_storage_enabled: true)
end
it 'does not check if new path matches existing repository on disk' do
expect(project).not_to receive(:repository_with_same_path_already_exists?)
result = update_project(project, admin, path: 'existing')
expect(result).to include(status: :success)
end
end
end
context 'when passing invalid parameters' do
it 'returns an error result when record cannot be updated' do
result = update_project(project, admin, { name: 'foo&bar' })
expect(result).to eq({
status: :error,
message: "Name can contain only letters, digits, emoji, '_', '.', '+', dashes, or spaces. It must start with a letter, digit, emoji, or '_'."
})
end
end
context 'when updating #emails_enabled' do
it 'updates the attribute for the project owner' do
expect { update_project(project, user, emails_enabled: false) }
.to change { project.emails_enabled }
.to(false)
end
it 'does not update when not project owner' do
maintainer = create(:user)
project.add_member(maintainer, :maintainer)
expect { update_project(project, maintainer, emails_enabled: false) }
.not_to change { project.emails_enabled }
end
end
context 'when updating #runner_registration_enabled' do
it 'updates the attribute' do
expect { update_project(project, user, runner_registration_enabled: false) }
.to change { project.runner_registration_enabled }
.to(false)
end
context 'when runner registration is disabled for all projects' do
before do
stub_application_setting(valid_runner_registrars: [])
end
it 'restricts updating the attribute' do
expect { update_project(project, user, runner_registration_enabled: false) }
.not_to change { project.runner_registration_enabled }
end
end
end
context 'when updating runners settings' do
let(:settings) do
{ instance_runners_enabled: true, namespace_traversal_ids: [123] }
end
let!(:pending_build) do
create(:ci_pending_build, project: project, **settings)
end
context 'when project has shared runners enabled' do
let(:project) { create(:project, shared_runners_enabled: true) }
it 'updates builds queue when shared runners get disabled' do
expect { update_project(project, admin, shared_runners_enabled: false) }
.to change { pending_build.reload.instance_runners_enabled }.to(false)
expect(pending_build.reload.instance_runners_enabled).to be false
end
end
context 'when project has shared runners disabled' do
let(:project) { create(:project, shared_runners_enabled: false) }
it 'updates builds queue when shared runners get enabled' do
expect { update_project(project, admin, shared_runners_enabled: true) }
.to not_change { pending_build.reload.instance_runners_enabled }
expect(pending_build.reload.instance_runners_enabled).to be true
end
end
context 'when project has group runners enabled' do
let(:project) { create(:project, group_runners_enabled: true) }
before do
project.ci_cd_settings.update!(group_runners_enabled: true)
end
it 'updates builds queue when group runners get disabled' do
update_project(project, admin, group_runners_enabled: false)
expect(pending_build.reload.namespace_traversal_ids).to be_empty
end
end
context 'when project has group runners disabled' do
let(:project) { create(:project, :in_subgroup, group_runners_enabled: false) }
before do
project.reload.ci_cd_settings.update!(group_runners_enabled: false)
end
it 'updates builds queue when group runners get enabled' do
update_project(project, admin, group_runners_enabled: true)
expect(pending_build.reload.namespace_traversal_ids).to include(project.namespace.id)
end
end
end
context 'with external authorization enabled' do
before do
enable_external_authorization_service_check
allow(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'default_label', project.full_path).and_call_original
end
it 'does not save the project with an error if the service denies access' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'new-label') { false }
result = update_project(project, user, { external_authorization_classification_label: 'new-label' })
expect(result[:message]).to be_present
expect(result[:status]).to eq(:error)
end
it 'saves the new label if the service allows access' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'new-label') { true }
result = update_project(project, user, { external_authorization_classification_label: 'new-label' })
expect(result[:status]).to eq(:success)
expect(project.reload.external_authorization_classification_label).to eq('new-label')
end
it 'checks the default label when the classification label was cleared' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'default_label') { true }
update_project(project, user, { external_authorization_classification_label: '' })
end
it 'does not check the label when it does not change' do
expect(::Gitlab::ExternalAuthorization).to receive(:access_allowed?).once
update_project(project, user, { name: 'New name' })
end
end
context 'when updating nested attributes for prometheus integration' do
context 'prometheus integration exists' do
let(:prometheus_integration_attributes) do
attributes_for(
:prometheus_integration,
project: project,
properties: { api_url: "http://new.prometheus.com", manual_configuration: "0" }
)
end
let!(:prometheus_integration) do
create(
:prometheus_integration,
project: project,
properties: { api_url: "http://old.prometheus.com", manual_configuration: "0" }
)
end
it 'updates existing record' do
expect { update_project(project, user, prometheus_integration_attributes: prometheus_integration_attributes) }
.to change { prometheus_integration.reload.api_url }
.from("http://old.prometheus.com")
.to("http://new.prometheus.com")
end
end
context 'prometheus integration does not exist' do
context 'valid parameters' do
let(:prometheus_integration_attributes) do
attributes_for(
:prometheus_integration,
project: project,
properties: { api_url: "http://example.prometheus.com", manual_configuration: "0" }
)
end
it 'creates new record' do
expect { update_project(project, user, prometheus_integration_attributes: prometheus_integration_attributes) }
.to change { ::Integrations::Prometheus.where(project: project).count }
.from(0)
.to(1)
end
end
context 'invalid parameters' do
let(:prometheus_integration_attributes) do
attributes_for(
:prometheus_integration,
project: project,
properties: { api_url: 'invalid-url', manual_configuration: "1" }
)
end
it 'does not create new record' do
expect { update_project(project, user, prometheus_integration_attributes: prometheus_integration_attributes) }
.not_to change { ::Integrations::Prometheus.where(project: project).count }
end
end
end
end
describe 'when changing repository_storage' do
let(:repository_read_only) { false }
let(:project) { create(:project, :repository, repository_read_only: repository_read_only) }
let(:opts) { { repository_storage: 'test_second_storage' } }
before do
stub_storage_settings('test_second_storage' => {})
end
shared_examples 'the transfer was not scheduled' do
it 'does not schedule the transfer' do
expect do
update_project(project, user, opts)
end.not_to change(project.repository_storage_moves, :count)
end
end
context 'authenticated as admin' do
let(:user) { create(:admin) }
context 'when admin mode is enabled', :enable_admin_mode do
it 'schedules the transfer of the repository to the new storage and locks the project' do
update_project(project, admin, opts)
expect(project).to be_repository_read_only
expect(project.repository_storage_moves.last).to have_attributes(
state: ::Projects::RepositoryStorageMove.state_machines[:state].states[:scheduled].value,
source_storage_name: 'default',
destination_storage_name: 'test_second_storage'
)
end
end
context 'when admin mode is disabled' do
it_behaves_like 'the transfer was not scheduled'
end
context 'the repository is read-only' do
let(:repository_read_only) { true }
it_behaves_like 'the transfer was not scheduled'
end
context 'the storage has not changed' do
let(:opts) { { repository_storage: 'default' } }
it_behaves_like 'the transfer was not scheduled'
end
context 'the storage does not exist' do
let(:opts) { { repository_storage: 'nonexistent' } }
it_behaves_like 'the transfer was not scheduled'
end
end
context 'authenticated as user' do
let(:user) { create(:user) }
it_behaves_like 'the transfer was not scheduled'
end
end
describe 'when updating topics' do
let(:project) { create(:project, topic_list: 'topic1, topic2') }
it 'update using topics' do
result = update_project(project, user, { topics: 'topics' })
expect(result[:status]).to eq(:success)
expect(project.topic_list).to eq(%w[topics])
end
it 'update using topic_list' do
result = update_project(project, user, { topic_list: 'topic_list' })
expect(result[:status]).to eq(:success)
expect(project.topic_list).to eq(%w[topic_list])
end
it 'update using tag_list (deprecated)' do
result = update_project(project, user, { tag_list: 'tag_list' })
expect(result[:status]).to eq(:success)
expect(project.topic_list).to eq(%w[tag_list])
end
end
describe 'when updating pages unique domain', feature_category: :pages do
before do
stub_pages_setting(enabled: true)
end
context 'when turning it on' do
it 'adds pages unique domain' do
expect(Gitlab::Pages).to receive(:add_unique_domain_to)
expect { update_project(project, user, project_setting_attributes: { pages_unique_domain_enabled: true }) }
.to change { project.project_setting.pages_unique_domain_enabled }
.from(false).to(true)
end
end
context 'when turning it off' do
it 'adds pages unique domain' do
expect(Gitlab::Pages).not_to receive(:add_unique_domain_to)
expect { update_project(project, user, project_setting_attributes: { pages_unique_domain_enabled: false }) }
.not_to change { project.project_setting.pages_unique_domain_enabled }
end
end
end
end
describe '#run_auto_devops_pipeline?' do
subject { described_class.new(project, user).run_auto_devops_pipeline? }
context 'when master contains a .gitlab-ci.yml file' do
before do
allow(project.repository).to receive(:gitlab_ci_yml).and_return("script: ['test']")
end
it { is_expected.to eq(false) }
end
context 'when auto devops is nil' do
it { is_expected.to eq(false) }
end
context 'when auto devops is explicitly enabled' do
before do
project.create_auto_devops!(enabled: true)
end
it { is_expected.to eq(true) }
end
context 'when auto devops is explicitly disabled' do
before do
project.create_auto_devops!(enabled: false)
end
it { is_expected.to eq(false) }
end
context 'when auto devops is set to instance setting' do
before do
project.create_auto_devops!(enabled: nil)
project.reload
allow(project.auto_devops).to receive(:previous_changes).and_return('enabled' => true)
end
context 'when auto devops is enabled system-wide' do
before do
stub_application_setting(auto_devops_enabled: true)
end
it { is_expected.to eq(true) }
end
context 'when auto devops is disabled system-wide' do
before do
stub_application_setting(auto_devops_enabled: false)
end
it { is_expected.to eq(false) }
end
end
end
def update_project(project, user, opts)
described_class.new(project, user, opts).execute
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class EnableDeployKeyService < BaseService
def execute
key_id = params[:key_id] || params[:id]
key = find_accessible_key(key_id)
return unless key
unless project.deploy_keys.include?(key)
project.deploy_keys << key
end
key
end
private
def find_accessible_key(key_id)
if current_user.admin?
DeployKey.find_by_id(key_id)
else
current_user.accessible_deploy_keys.find_by_id(key_id)
end
end
end
end
Projects::EnableDeployKeyService.prepend_mod_with('Projects::EnableDeployKeyService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::EnableDeployKeyService, feature_category: :continuous_delivery do
let(:deploy_key) { create(:deploy_key, public: true) }
let(:project) { create(:project) }
let(:user) { project.creator }
let!(:params) { { key_id: deploy_key.id } }
it 'enables the key' do
expect do
service.execute
end.to change { project.deploy_keys.count }.from(0).to(1)
end
context 'trying to add an unaccessable key' do
let(:another_key) { create(:another_key) }
let!(:params) { { key_id: another_key.id } }
it 'returns nil if the key cannot be added' do
expect(service.execute).to be nil
end
end
context 'add the same key twice' do
before do
project.deploy_keys << deploy_key
end
it 'returns existing key' do
expect(service.execute).to eq(deploy_key)
end
end
def service
Projects::EnableDeployKeyService.new(project, user, params)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveLfsObjectsProjectsService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
Project.transaction do
move_lfs_objects_projects
remove_remaining_lfs_objects_project if remove_remaining_elements
success
end
end
private
def move_lfs_objects_projects
non_existent_lfs_objects_projects.update_all(project_id: @project.id)
end
def remove_remaining_lfs_objects_project
source_project.lfs_objects_projects.destroy_all # rubocop: disable Cop/DestroyAll
end
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_lfs_objects_projects
source_project.lfs_objects_projects.where.not(lfs_object: @project.lfs_objects)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveLfsObjectsProjectsService, feature_category: :source_code_management do
let!(:user) { create(:user) }
let!(:project_with_lfs_objects) { create(:project, namespace: user.namespace) }
let!(:target_project) { create(:project, namespace: user.namespace) }
subject { described_class.new(target_project, user) }
before do
create_list(:lfs_objects_project, 3, project: project_with_lfs_objects)
end
describe '#execute' do
it 'links the lfs objects from existent in source project' do
expect(target_project.lfs_objects.count).to eq 0
subject.execute(project_with_lfs_objects)
expect(project_with_lfs_objects.reload.lfs_objects.count).to eq 0
expect(target_project.reload.lfs_objects.count).to eq 3
end
it 'does not link existent lfs_object in the current project' do
target_project.lfs_objects << project_with_lfs_objects.lfs_objects.first(2)
expect(target_project.lfs_objects.count).to eq 2
subject.execute(project_with_lfs_objects)
expect(target_project.lfs_objects.count).to eq 3
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_lfs_objects) }.to raise_error(StandardError)
expect(project_with_lfs_objects.lfs_objects.count).to eq 3
expect(target_project.lfs_objects.count).to eq 0
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining lfs objects' do
target_project.lfs_objects << project_with_lfs_objects.lfs_objects.first(2)
subject.execute(project_with_lfs_objects, **options)
expect(project_with_lfs_objects.lfs_objects.count).not_to eq 0
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class to detect target platforms of a project made for the Apple
# Ecosystem.
#
# This service searches project.pbxproj and *.xcconfig files (contains build
# settings) for the string "SDKROOT = <SDK_name>" where SDK_name can be
# 'iphoneos', 'macosx', 'appletvos' or 'watchos'. Currently, the service is
# intentionally limited (for performance reasons) to detect if a project
# targets iOS.
#
# Ref: https://developer.apple.com/documentation/xcode/build-settings-reference/
#
# Example usage:
# > AppleTargetPlatformDetectorService.new(a_project).execute
# => []
# > AppleTargetPlatformDetectorService.new(an_ios_project).execute
# => [:ios]
# > AppleTargetPlatformDetectorService.new(multiplatform_project).execute
# => [:ios, :osx, :tvos, :watchos]
class AppleTargetPlatformDetectorService < BaseService
BUILD_CONFIG_FILENAMES = %w[project.pbxproj *.xcconfig].freeze
# For the current iteration, we only want to detect when the project targets
# iOS. In the future, we can use the same logic to detect projects that
# target OSX, TvOS, and WatchOS platforms with SDK names 'macosx', 'appletvos',
# and 'watchos', respectively.
PLATFORM_SDK_NAMES = { ios: 'iphoneos' }.freeze
def execute
detect_platforms
end
private
def file_finder
@file_finder ||= ::Gitlab::FileFinder.new(project, project.default_branch)
end
def detect_platforms
# Return array of SDK names for which "SDKROOT = <sdk_name>" setting
# definition can be found in either project.pbxproj or *.xcconfig files.
PLATFORM_SDK_NAMES.select do |_, sdk|
config_files_containing_sdk_setting(sdk).present?
end.keys
end
# Return array of project.pbxproj and/or *.xcconfig files
# (Gitlab::Search::FoundBlob) that contain the setting definition string
# "SDKROOT = <sdk_name>"
def config_files_containing_sdk_setting(sdk)
BUILD_CONFIG_FILENAMES.map do |filename|
file_finder.find("SDKROOT = #{sdk} filename:#{filename}")
end.flatten
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::AppleTargetPlatformDetectorService, feature_category: :groups_and_projects do
let_it_be(:project) { build(:project) }
subject { described_class.new(project).execute }
context 'when project is not an xcode project' do
before do
allow(Gitlab::FileFinder).to receive(:new) { instance_double(Gitlab::FileFinder, find: []) }
end
it 'returns an empty array' do
is_expected.to match_array []
end
end
context 'when project is an xcode project' do
using RSpec::Parameterized::TableSyntax
let(:finder) { instance_double(Gitlab::FileFinder) }
before do
allow(Gitlab::FileFinder).to receive(:new) { finder }
end
def search_query(sdk, filename)
"SDKROOT = #{sdk} filename:#{filename}"
end
context 'when setting string is found' do
where(:sdk, :filename, :result) do
'iphoneos' | 'project.pbxproj' | [:ios]
'iphoneos' | '*.xcconfig' | [:ios]
end
with_them do
before do
allow(finder).to receive(:find).with(anything) { [] }
allow(finder).to receive(:find).with(search_query(sdk, filename)) { [instance_double(Gitlab::Search::FoundBlob)] }
end
it 'returns an array of unique detected targets' do
is_expected.to match_array result
end
end
end
context 'when setting string is not found' do
before do
allow(finder).to receive(:find).with(anything) { [] }
end
it 'returns an empty array' do
is_expected.to match_array []
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class RecordTargetPlatformsService < BaseService
include Gitlab::Utils::StrongMemoize
def initialize(project, detector_service)
@project = project
@detector_service = detector_service
end
def execute
record_target_platforms
end
private
attr_reader :project, :detector_service
def target_platforms
strong_memoize(:target_platforms) do
Array(detector_service.new(project).execute)
end
end
def record_target_platforms
return unless target_platforms.present?
project_setting.target_platforms = target_platforms
project_setting.save
project_setting.target_platforms
end
def project_setting
@project_setting ||= ::ProjectSetting.find_or_initialize_by(project: project) # rubocop:disable CodeReuse/ActiveRecord
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::RecordTargetPlatformsService, '#execute', feature_category: :groups_and_projects do
let_it_be(:project) { create(:project) }
let(:detector_service) { Projects::AppleTargetPlatformDetectorService }
subject(:execute) { described_class.new(project, detector_service).execute }
context 'when project is an XCode project' do
def project_setting
ProjectSetting.find_by_project_id(project.id)
end
before do
double = instance_double(detector_service, execute: [:ios, :osx])
allow(Projects::AppleTargetPlatformDetectorService).to receive(:new) { double }
end
it 'creates a new setting record for the project', :aggregate_failures do
expect { execute }.to change { ProjectSetting.count }.from(0).to(1)
expect(ProjectSetting.last.target_platforms).to match_array(%w[ios osx])
end
it 'returns array of detected target platforms' do
expect(execute).to match_array %w[ios osx]
end
context 'when a project has an existing setting record' do
before do
create(:project_setting, project: project, target_platforms: saved_target_platforms)
end
context 'when target platforms changed' do
let(:saved_target_platforms) { %w[tvos] }
it 'updates' do
expect { execute }.to change { project_setting.target_platforms }.from(%w[tvos]).to(%w[ios osx])
end
it { is_expected.to match_array %w[ios osx] }
end
context 'when target platforms are the same' do
let(:saved_target_platforms) { %w[osx ios] }
it 'does not update' do
expect { execute }.not_to change { project_setting.updated_at }
end
end
end
end
context 'when project is not an XCode project' do
before do
double = instance_double(Projects::AppleTargetPlatformDetectorService, execute: [])
allow(Projects::AppleTargetPlatformDetectorService).to receive(:new).with(project) { double }
end
it 'does nothing' do
expect { execute }.not_to change { ProjectSetting.count }
end
it { is_expected.to be_nil }
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Service class for counting and caching the number of open issues of a
# project.
class OpenIssuesCountService < Projects::CountService
include Gitlab::Utils::StrongMemoize
# Cache keys used to store issues count
PUBLIC_COUNT_KEY = 'public_open_issues_count'
TOTAL_COUNT_KEY = 'total_open_issues_count'
def initialize(project, user = nil)
@user = user
super(project)
end
def cache_key_name
public_only? ? PUBLIC_COUNT_KEY : TOTAL_COUNT_KEY
end
def public_only?
!user_is_at_least_reporter?
end
def user_is_at_least_reporter?
strong_memoize(:user_is_at_least_reporter) do
@project.member?(@user, Gitlab::Access::REPORTER)
end
end
def relation_for_count
self.class.query(@project, public_only: public_only?)
end
def public_count_cache_key
cache_key(PUBLIC_COUNT_KEY)
end
def total_count_cache_key
cache_key(TOTAL_COUNT_KEY)
end
# rubocop: disable CodeReuse/ActiveRecord
def refresh_cache(&block)
count_grouped_by_confidential = self.class.query(@project, public_only: false).group(:confidential).count
public_count = count_grouped_by_confidential[false] || 0
total_count = public_count + (count_grouped_by_confidential[true] || 0)
update_cache_for_key(public_count_cache_key) do
public_count
end
update_cache_for_key(total_count_cache_key) do
total_count
end
end
# We only show issues count including confidential for reporters, who are allowed to view confidential issues.
# This will still show a discrepancy on issues number but should be less than before.
# Check https://gitlab.com/gitlab-org/gitlab-foss/issues/38418 description.
# rubocop: disable CodeReuse/ActiveRecord
def self.query(projects, public_only: true)
open_issues = Issue.opened
if public_only
open_issues.public_only.where(project: projects)
else
open_issues.where(project: projects)
end
end
# rubocop: enable CodeReuse/ActiveRecord
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::OpenIssuesCountService, :use_clean_rails_memory_store_caching, feature_category: :team_planning do
let(:project) { create(:project) }
subject { described_class.new(project) }
it_behaves_like 'a counter caching service'
describe '#count' do
context 'when user is nil' do
it 'does not include confidential issues in the issue count' do
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
expect(described_class.new(project).count).to eq(1)
end
end
context 'when user is provided' do
let(:user) { create(:user) }
context 'when user can read confidential issues' do
before do
project.add_reporter(user)
end
it 'returns the right count with confidential issues' do
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
expect(described_class.new(project, user).count).to eq(2)
end
it 'uses total_open_issues_count cache key' do
expect(described_class.new(project, user).cache_key_name).to eq('total_open_issues_count')
end
end
context 'when user cannot read confidential issues' do
before do
project.add_guest(user)
end
it 'does not include confidential issues' do
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
expect(described_class.new(project, user).count).to eq(1)
end
it 'uses public_open_issues_count cache key' do
expect(described_class.new(project, user).cache_key_name).to eq('public_open_issues_count')
end
end
end
describe '#refresh_cache' do
before do
create(:issue, :opened, project: project)
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
end
context 'when cache is empty' do
it 'refreshes cache keys correctly' do
subject.refresh_cache
expect(Rails.cache.read(subject.cache_key(described_class::PUBLIC_COUNT_KEY))).to eq(2)
expect(Rails.cache.read(subject.cache_key(described_class::TOTAL_COUNT_KEY))).to eq(3)
end
end
context 'when cache is outdated' do
before do
subject.refresh_cache
end
it 'refreshes cache keys correctly' do
create(:issue, :opened, project: project)
create(:issue, :opened, confidential: true, project: project)
subject.refresh_cache
expect(Rails.cache.read(subject.cache_key(described_class::PUBLIC_COUNT_KEY))).to eq(3)
expect(Rails.cache.read(subject.cache_key(described_class::TOTAL_COUNT_KEY))).to eq(5)
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
# Service class for getting and caching the number of issues of several projects
# Warning: do not user this service with a really large set of projects
# because the service use maps to retrieve the project ids
module Projects
class BatchOpenIssuesCountService < Projects::BatchCountService
# rubocop: disable CodeReuse/ActiveRecord
def global_count
@global_count ||= count_service.query(project_ids).group(:project_id).count
end
# rubocop: enable CodeReuse/ActiveRecord
def count_service
::Projects::OpenIssuesCountService
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::BatchOpenIssuesCountService, feature_category: :groups_and_projects do
let!(:project_1) { create(:project) }
let!(:project_2) { create(:project) }
let(:subject) { described_class.new([project_1, project_2]) }
describe '#refresh_cache_and_retrieve_data', :use_clean_rails_memory_store_caching do
before do
create(:issue, project: project_1)
create(:issue, project: project_1, confidential: true)
create(:issue, project: project_2)
create(:issue, project: project_2, confidential: true)
end
context 'when cache is clean' do
it 'refreshes cache keys correctly' do
subject.refresh_cache_and_retrieve_data
# It does not update total issues cache
expect(Rails.cache.read(get_cache_key(subject, project_1))).to eq(nil)
expect(Rails.cache.read(get_cache_key(subject, project_2))).to eq(nil)
expect(Rails.cache.read(get_cache_key(subject, project_1, true))).to eq(1)
expect(Rails.cache.read(get_cache_key(subject, project_1, true))).to eq(1)
end
end
end
def get_cache_key(subject, project, public_key = false)
service = subject.count_service.new(project)
if public_key
service.cache_key(service.class::PUBLIC_COUNT_KEY)
else
service.cache_key(service.class::TOTAL_COUNT_KEY)
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class GitDeduplicationService < BaseService
include ExclusiveLeaseGuard
LEASE_TIMEOUT = 86400
delegate :pool_repository, to: :project
attr_reader :project
def initialize(project)
@project = project
end
def execute
try_obtain_lease do
unless project.has_pool_repository?
disconnect_git_alternates
break
end
if source_project? && pool_can_fetch_from_source?
fetch_from_source
end
project.link_pool_repository if same_storage_as_pool?(project.repository)
end
end
private
def disconnect_git_alternates
project.repository.disconnect_alternates
end
def pool_can_fetch_from_source?
project.git_objects_poolable? &&
same_storage_as_pool?(pool_repository.source_project.repository)
end
def same_storage_as_pool?(repository)
pool_repository.object_pool.repository.storage == repository.storage
end
def fetch_from_source
project.pool_repository.object_pool.fetch
end
def source_project?
return unless project.has_pool_repository?
project.pool_repository.source_project == project
end
def lease_timeout
LEASE_TIMEOUT
end
def lease_key
"git_deduplication:#{project.id}"
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::GitDeduplicationService, feature_category: :source_code_management do
include ExclusiveLeaseHelpers
let(:pool) { create(:pool_repository, :ready) }
let(:project) { create(:project, :repository) }
let(:lease_key) { "git_deduplication:#{project.id}" }
let(:lease_timeout) { Projects::GitDeduplicationService::LEASE_TIMEOUT }
subject(:service) { described_class.new(project) }
describe '#execute' do
context 'when there is not already a lease' do
context 'when the project does not have a pool repository' do
it 'calls disconnect_git_alternates' do
stub_exclusive_lease(lease_key, timeout: lease_timeout)
expect(project.repository).to receive(:disconnect_alternates)
service.execute
end
end
context 'when the project has a pool repository' do
let(:project) { create(:project, :repository, pool_repository: pool) }
context 'when the project is a source project' do
let(:lease_key) { "git_deduplication:#{pool.source_project.id}" }
subject(:service) { described_class.new(pool.source_project) }
it 'calls fetch' do
stub_exclusive_lease(lease_key, timeout: lease_timeout)
allow(pool.source_project).to receive(:git_objects_poolable?).and_return(true)
expect(pool.object_pool).to receive(:fetch)
service.execute
end
it 'does not call fetch if git objects are not poolable' do
stub_exclusive_lease(lease_key, timeout: lease_timeout)
allow(pool.source_project).to receive(:git_objects_poolable?).and_return(false)
expect(pool.object_pool).not_to receive(:fetch)
service.execute
end
it 'does not call fetch if pool and project are not on the same storage' do
stub_exclusive_lease(lease_key, timeout: lease_timeout)
allow(pool.source_project.repository).to receive(:storage).and_return('special_storage_001')
expect(pool.object_pool).not_to receive(:fetch)
service.execute
end
context 'when visibility level of the project' do
before do
allow(pool.source_project).to receive(:repository_access_level).and_return(ProjectFeature::ENABLED)
end
context 'is private' do
it 'does not call fetch' do
allow(pool.source_project).to receive(:visibility_level).and_return(Gitlab::VisibilityLevel::PRIVATE)
expect(pool.object_pool).not_to receive(:fetch)
service.execute
end
end
context 'is public' do
it 'calls fetch' do
allow(pool.source_project).to receive(:visibility_level).and_return(Gitlab::VisibilityLevel::PUBLIC)
expect(pool.object_pool).to receive(:fetch)
service.execute
end
end
context 'is internal' do
it 'calls fetch' do
allow(pool.source_project).to receive(:visibility_level).and_return(Gitlab::VisibilityLevel::INTERNAL)
expect(pool.object_pool).to receive(:fetch)
service.execute
end
end
end
context 'when the repository access level' do
before do
allow(pool.source_project).to receive(:visibility_level).and_return(Gitlab::VisibilityLevel::PUBLIC)
end
context 'is private' do
it 'does not call fetch' do
allow(pool.source_project).to receive(:repository_access_level).and_return(ProjectFeature::PRIVATE)
expect(pool.object_pool).not_to receive(:fetch)
service.execute
end
end
context 'is greater than private' do
it 'calls fetch' do
allow(pool.source_project).to receive(:repository_access_level).and_return(ProjectFeature::PUBLIC)
expect(pool.object_pool).to receive(:fetch)
service.execute
end
end
end
end
it 'links the repository to the object pool' do
expect(project).to receive(:link_pool_repository)
service.execute
end
it 'does not link the repository to the object pool if they are not on the same storage' do
allow(project.repository).to receive(:storage).and_return('special_storage_001')
expect(project).not_to receive(:link_pool_repository)
service.execute
end
end
context 'when a lease is already out' do
before do
stub_exclusive_lease_taken(lease_key, timeout: lease_timeout)
end
it 'fails when a lease is already out' do
expect(Gitlab::AppJsonLogger).to receive(:error).with({ message: "Cannot obtain an exclusive lease. There must be another instance already in execution.", lease_key: lease_key, class_name: described_class.name, lease_timeout: lease_timeout })
service.execute
end
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class UpdateStatisticsService < BaseService
include ::Gitlab::Utils::StrongMemoize
STAT_TO_CACHED_METHOD = {
repository_size: [:size, :recent_objects_size],
commit_count: :commit_count
}.freeze
def execute
return unless project
Gitlab::AppLogger.info("Updating statistics for project #{project.id}")
expire_repository_caches
expire_wiki_caches
project.statistics.refresh!(only: statistics)
end
private
def expire_repository_caches
if statistics.empty?
project.repository.expire_statistics_caches
elsif method_caches_to_expire.present?
project.repository.expire_method_caches(method_caches_to_expire)
end
end
def expire_wiki_caches
return unless project.wiki_enabled? && statistics.include?(:wiki_size)
project.wiki.repository.expire_method_caches([:size])
end
def method_caches_to_expire
strong_memoize(:method_caches_to_expire) do
statistics.flat_map { |stat| STAT_TO_CACHED_METHOD[stat] }.compact
end
end
def statistics
strong_memoize(:statistics) do
params[:statistics]&.map(&:to_sym)
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::UpdateStatisticsService, feature_category: :groups_and_projects do
using RSpec::Parameterized::TableSyntax
let(:service) { described_class.new(project, nil, statistics: statistics) }
let(:statistics) { %w[repository_size] }
describe '#execute' do
context 'with a non-existing project' do
let(:project) { nil }
it 'does nothing' do
expect_any_instance_of(ProjectStatistics).not_to receive(:refresh!)
service.execute
end
end
context 'with an existing project' do
let_it_be(:project) { create(:project) }
where(:statistics, :method_caches) do
[] | %i[size recent_objects_size commit_count]
['repository_size'] | %i[size recent_objects_size]
[:repository_size] | %i[size recent_objects_size]
[:lfs_objects_size] | nil
[:commit_count] | [:commit_count]
[:repository_size, :commit_count] | %i[size recent_objects_size commit_count]
[:repository_size, :commit_count, :lfs_objects_size] | %i[size recent_objects_size commit_count]
end
with_them do
it 'refreshes the project statistics' do
expect(project.statistics).to receive(:refresh!).with(only: statistics.map(&:to_sym)).and_call_original
service.execute
end
it 'invalidates the method caches after a refresh' do
expect(project.wiki.repository).not_to receive(:expire_method_caches)
if method_caches.present?
expect(project.repository).to receive(:expire_method_caches).with(method_caches).and_call_original
else
expect(project.repository).not_to receive(:expire_method_caches)
end
service.execute
end
end
end
context 'with an existing project with a Wiki' do
let(:project) { create(:project, :repository, :wiki_enabled) }
let(:statistics) { [:wiki_size] }
it 'invalidates and refreshes Wiki size' do
expect(project.statistics).to receive(:refresh!).with(only: statistics).and_call_original
expect(project.wiki.repository).to receive(:expire_method_caches).with(%i[size]).and_call_original
service.execute
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Base class for the various service classes that count project data (e.g.
# issues or forks).
class CountService < BaseCountService
# The version of the cache format. This should be bumped whenever the
# underlying logic changes. This removes the need for explicitly flushing
# all caches.
VERSION = 1
attr_reader :project
def initialize(project)
@project = project
end
def relation_for_count
self.class.query(@project.id)
end
def cache_key_name
raise(
NotImplementedError,
'"cache_key_name" must be implemented and return a String'
)
end
def cache_key(key = nil)
cache_key = key || cache_key_name
['projects', 'count_service', VERSION, @project.id, cache_key]
end
def self.query(project_ids)
raise(
NotImplementedError,
'"query" must be implemented and return an ActiveRecord::Relation'
)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::CountService, feature_category: :groups_and_projects do
let(:project) { build(:project, id: 1) }
let(:service) { described_class.new(project) }
describe '.query' do
it 'raises NotImplementedError' do
expect { described_class.query(project.id) }.to raise_error(NotImplementedError)
end
end
describe '#relation_for_count' do
it 'calls the class method query with the project id' do
expect(described_class).to receive(:query).with(project.id)
service.relation_for_count
end
end
describe '#count' do
before do
allow(service).to receive(:cache_key_name).and_return('count_service')
end
it 'returns the number of rows' do
allow(service).to receive(:uncached_count).and_return(1)
expect(service.count).to eq(1)
end
it 'caches the number of rows', :use_clean_rails_memory_store_caching do
expect(service).to receive(:uncached_count).once.and_return(1)
2.times do
expect(service.count).to eq(1)
end
end
end
describe '#refresh_cache', :use_clean_rails_memory_store_caching do
before do
allow(service).to receive(:cache_key_name).and_return('count_service')
end
it 'refreshes the cache' do
expect(service).to receive(:uncached_count).once.and_return(1)
service.refresh_cache
expect(service.count).to eq(1)
end
end
describe '#delete_cache', :use_clean_rails_memory_store_caching do
before do
allow(service).to receive(:cache_key_name).and_return('count_service')
end
it 'removes the cache' do
expect(service).to receive(:uncached_count).twice.and_return(1)
service.count
service.delete_cache
service.count
end
end
describe '#cache_key_name' do
it 'raises NotImplementedError' do
expect { service.cache_key_name }.to raise_error(NotImplementedError)
end
end
describe '#cache_key' do
it 'returns the cache key as an Array' do
allow(service).to receive(:cache_key_name).and_return('foo')
expect(service.cache_key).to eq(['projects', 'count_service', described_class::VERSION, 1, 'foo'])
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class MoveDeployKeysProjectsService < BaseMoveRelationsService
def execute(source_project, remove_remaining_elements: true)
return unless super
# The SHA256 fingerprint should be there, but just in case it isn't
# we want to make sure it's generated. Otherwise we might delete keys.
ensure_sha256_fingerprints
Project.transaction do
move_deploy_keys_projects
remove_remaining_deploy_keys_projects if remove_remaining_elements
success
end
end
private
def ensure_sha256_fingerprints
@project.deploy_keys.each(&:ensure_sha256_fingerprint!)
source_project.deploy_keys.each(&:ensure_sha256_fingerprint!)
end
def move_deploy_keys_projects
non_existent_deploy_keys_projects.update_all(project_id: @project.id)
end
# rubocop: disable CodeReuse/ActiveRecord
def non_existent_deploy_keys_projects
source_project.deploy_keys_projects
.joins(:deploy_key)
.where.not(keys: { fingerprint_sha256: @project.deploy_keys.select(:fingerprint_sha256) })
end
# rubocop: enable CodeReuse/ActiveRecord
def remove_remaining_deploy_keys_projects
source_project.deploy_keys_projects.destroy_all # rubocop: disable Cop/DestroyAll
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::MoveDeployKeysProjectsService, feature_category: :continuous_delivery do
let!(:user) { create(:user) }
let!(:project_with_deploy_keys) { create(:project, namespace: user.namespace) }
let!(:target_project) { create(:project, namespace: user.namespace) }
subject { described_class.new(target_project, user) }
describe '#execute' do
before do
create_list(:deploy_keys_project, 2, project: project_with_deploy_keys)
end
it 'moves the user\'s deploy keys from one project to another' do
expect(project_with_deploy_keys.deploy_keys_projects.count).to eq 2
expect(target_project.deploy_keys_projects.count).to eq 0
subject.execute(project_with_deploy_keys)
expect(project_with_deploy_keys.deploy_keys_projects.count).to eq 0
expect(target_project.deploy_keys_projects.count).to eq 2
end
it 'does not link existent deploy_keys in the current project' do
target_project.deploy_keys << project_with_deploy_keys.deploy_keys.first
expect(project_with_deploy_keys.deploy_keys_projects.count).to eq 2
expect(target_project.deploy_keys_projects.count).to eq 1
subject.execute(project_with_deploy_keys)
expect(project_with_deploy_keys.deploy_keys_projects.count).to eq 0
expect(target_project.deploy_keys_projects.count).to eq 2
end
it 'rollbacks changes if transaction fails' do
allow(subject).to receive(:success).and_raise(StandardError)
expect { subject.execute(project_with_deploy_keys) }.to raise_error(StandardError)
expect(project_with_deploy_keys.deploy_keys_projects.count).to eq 2
expect(target_project.deploy_keys_projects.count).to eq 0
end
context 'when remove_remaining_elements is false' do
let(:options) { { remove_remaining_elements: false } }
it 'does not remove remaining deploy keys projects' do
target_project.deploy_keys << project_with_deploy_keys.deploy_keys.first
subject.execute(project_with_deploy_keys, **options)
expect(project_with_deploy_keys.deploy_keys_projects.count).not_to eq 0
end
end
context 'when SHA256 fingerprint is missing' do
before do
create(:deploy_keys_project, project: target_project)
DeployKey.all.update_all(fingerprint_sha256: nil)
end
it 'moves the user\'s deploy keys from one project to another' do
combined_keys = project_with_deploy_keys.deploy_keys + target_project.deploy_keys
subject.execute(project_with_deploy_keys)
expect(project_with_deploy_keys.deploy_keys.reload).to be_empty
expect(target_project.deploy_keys.reload).to match_array(combined_keys)
expect(DeployKey.all.select(:fingerprint_sha256)).to all(be_present)
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class DetectRepositoryLanguagesService < BaseService
attr_reader :programming_languages
# rubocop: disable CodeReuse/ActiveRecord
def execute
repository_languages = project.repository_languages
detection = Gitlab::LanguageDetection.new(repository, repository_languages)
matching_programming_languages = ensure_programming_languages(detection)
RepositoryLanguage.transaction do
RepositoryLanguage.where(project_id: project.id, programming_language_id: detection.deletions).delete_all
detection.updates.each do |update|
RepositoryLanguage
.where(project_id: project.id)
.where(programming_language_id: update[:programming_language_id])
.update_all(share: update[:share])
end
ApplicationRecord.legacy_bulk_insert( # rubocop:disable Gitlab/BulkInsert
RepositoryLanguage.table_name,
detection.insertions(matching_programming_languages)
)
set_detected_repository_languages
end
project.repository_languages.reset
end
# rubocop: enable CodeReuse/ActiveRecord
private
# rubocop: disable CodeReuse/ActiveRecord
def ensure_programming_languages(detection)
existing_languages = ProgrammingLanguage.where(name: detection.languages)
return existing_languages if detection.languages.size == existing_languages.size
missing_languages = detection.languages - existing_languages.map(&:name)
created_languages = missing_languages.map do |name|
create_language(name, detection.language_color(name))
end
existing_languages + created_languages
end
# rubocop: enable CodeReuse/ActiveRecord
# rubocop: disable CodeReuse/ActiveRecord
def create_language(name, color)
ProgrammingLanguage.transaction do
ProgrammingLanguage.where(name: name).first_or_create(color: color)
end
rescue ActiveRecord::RecordNotUnique
retry
end
# rubocop: enable CodeReuse/ActiveRecord
def set_detected_repository_languages
return if project.detected_repository_languages?
project.update_column(:detected_repository_languages, true)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::DetectRepositoryLanguagesService, :clean_gitlab_redis_shared_state, feature_category: :groups_and_projects do
let_it_be(:project, reload: true) { create(:project, :repository) }
subject { described_class.new(project) }
describe '#execute' do
context 'without previous detection' do
it 'inserts new programming languages in the database' do
subject.execute
expect(ProgrammingLanguage.exists?(name: 'Ruby')).to be(true)
expect(ProgrammingLanguage.count).to be(4)
end
it 'inserts the repository langauges' do
names = subject.execute.map(&:name)
expect(names).to eq(%w[Ruby JavaScript HTML CoffeeScript])
end
it 'updates detected_repository_languages flag' do
expect { subject.execute }.to change(project, :detected_repository_languages).to(true)
end
end
context 'with a previous detection' do
before do
subject.execute
allow(project.repository).to receive(:languages).and_return(
[{ value: 99.63, label: "Ruby", color: "#701516", highlight: "#701516" },
{ value: 0.3, label: "D", color: "#701516", highlight: "#701516" }]
)
end
it 'updates the repository languages' do
repository_languages = subject.execute.map(&:name)
expect(repository_languages).to eq(%w[Ruby D])
end
it "doesn't touch detected_repository_languages flag" do
expect(project).not_to receive(:update_column).with(:detected_repository_languages, true)
subject.execute
end
end
context 'when no repository exists' do
let_it_be(:project) { create(:project) }
it 'has no languages' do
expect(subject.execute).to be_empty
expect(project.repository_languages).to be_empty
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
class CreateService < BaseService
include ValidatesClassificationLabel
ImportSourceDisabledError = Class.new(StandardError)
INTERNAL_IMPORT_SOURCES = %w[gitlab_custom_project_template gitlab_project_migration].freeze
def initialize(user, params)
@current_user = user
@params = params.dup
@skip_wiki = @params.delete(:skip_wiki)
@initialize_with_sast = Gitlab::Utils.to_boolean(@params.delete(:initialize_with_sast))
@initialize_with_readme = Gitlab::Utils.to_boolean(@params.delete(:initialize_with_readme))
@use_sha256_repository = Gitlab::Utils.to_boolean(@params.delete(:use_sha256_repository)) && Feature.enabled?(:support_sha256_repositories, user)
@import_data = @params.delete(:import_data)
@relations_block = @params.delete(:relations_block)
@default_branch = @params.delete(:default_branch)
@readme_template = @params.delete(:readme_template)
build_topics
end
def execute
params[:wiki_enabled] = params[:wiki_access_level] if params[:wiki_access_level]
params[:builds_enabled] = params[:builds_access_level] if params[:builds_access_level]
params[:snippets_enabled] = params[:snippets_access_level] if params[:snippets_access_level]
params[:merge_requests_enabled] = params[:merge_requests_access_level] if params[:merge_requests_access_level]
params[:issues_enabled] = params[:issues_access_level] if params[:issues_access_level]
if create_from_template?
return ::Projects::CreateFromTemplateService.new(current_user, params).execute
end
@project = Project.new(params.merge(creator: current_user))
validate_import_source_enabled!
@project.visibility_level = @project.group.visibility_level unless @project.visibility_level_allowed_by_group?
# If a project is newly created it should have shared runners settings
# based on its group having it enabled. This is like the "default value"
@project.shared_runners_enabled = false if !params.key?(:shared_runners_enabled) && @project.group && @project.group.shared_runners_setting != 'enabled'
# Make sure that the user is allowed to use the specified visibility level
if project_visibility.restricted?
deny_visibility_level(@project, project_visibility.visibility_level)
return @project
end
set_project_name_from_path
# get namespace id
namespace_id = params[:namespace_id] || current_user.namespace_id
@project.namespace_id = namespace_id.to_i
@project.check_personal_projects_limit
return @project if @project.errors.any?
validate_create_permissions
validate_import_permissions
return @project if @project.errors.any?
@relations_block&.call(@project)
yield(@project) if block_given?
validate_classification_label(@project, :external_authorization_classification_label)
# If the block added errors, don't try to save the project
return @project if @project.errors.any?
@project.creator = current_user
save_project_and_import_data
Gitlab::ApplicationContext.with_context(project: @project) do
after_create_actions if @project.persisted?
import_schedule
end
@project
rescue ActiveRecord::RecordInvalid => e
message = "Unable to save #{e.inspect}: #{e.record.errors.full_messages.join(", ")}"
fail(error: message)
rescue ImportSourceDisabledError => e
@project.errors.add(:import_source_disabled, e.message) if @project
fail(error: e.message)
rescue StandardError => e
@project.errors.add(:base, e.message) if @project
fail(error: e.message)
end
protected
def validate_create_permissions
return if current_user.can?(:create_projects, parent_namespace)
@project.errors.add(:namespace, "is not valid")
end
def validate_import_permissions
return unless @project.import?
return if current_user.can?(:import_projects, parent_namespace)
@project.errors.add(:user, 'is not allowed to import projects')
end
def after_create_actions
log_info("#{current_user.name} created a new project \"#{@project.full_name}\"")
if @project.import?
Gitlab::Tracking.event(self.class.name, 'import_project', user: current_user)
else
# Skip writing the config for project imports/forks because it
# will always fail since the Git directory doesn't exist until
# a background job creates it (see Project#add_import_job).
@project.set_full_path
end
unless @project.gitlab_project_import?
@project.create_wiki unless skip_wiki?
end
@project.track_project_repository
create_project_settings
yield if block_given?
event_service.create_project(@project, current_user)
system_hook_service.execute_hooks_for(@project, :create)
setup_authorizations
project.invalidate_personal_projects_count_of_owner
Projects::PostCreationWorker.perform_async(@project.id)
create_readme if @initialize_with_readme
create_sast_commit if @initialize_with_sast
publish_event
end
def create_project_settings
Gitlab::Pages.add_unique_domain_to(project)
@project.project_setting.save if @project.project_setting.changed?
end
# Add an authorization for the current user authorizations inline
# (so they can access the project immediately after this request
# completes), and any other affected users in the background
def setup_authorizations
if @project.group
group_access_level = @project.group.max_member_access_for_user(
current_user,
only_concrete_membership: true
)
if group_access_level > GroupMember::NO_ACCESS
current_user.project_authorizations.safe_find_or_create_by!(
project: @project,
access_level: group_access_level)
end
AuthorizedProjectUpdate::ProjectRecalculateWorker.perform_async(@project.id)
# AuthorizedProjectsWorker uses an exclusive lease per user but
# specialized workers might have synchronization issues. Until we
# compare the inconsistency rates of both approaches, we still run
# AuthorizedProjectsWorker but with some delay and lower urgency as a
# safety net.
@project.group.refresh_members_authorized_projects(
priority: UserProjectAccessChangedService::LOW_PRIORITY
)
else
owner_user = @project.namespace.owner
owner_member = @project.add_owner(owner_user, current_user: current_user)
# There is a possibility that the sidekiq job to refresh the authorizations of the owner_user in this project
# isn't picked up (or finished) by the time the user is redirected to the newly created project's page.
# If that happens, the user will hit a 404. To avoid that scenario, we manually create a `project_authorizations` record for the user here.
if owner_member.persisted?
owner_user.project_authorizations.safe_find_or_create_by(
project: @project,
access_level: ProjectMember::OWNER
)
end
# During the process of adding a project owner, a check on permissions is made on the user which caches
# the max member access for that user on this project.
# Since that is `0` before the member is created - and we are still inside the request
# cycle when we need to do other operations that might check those permissions (e.g. write a commit)
# we need to purge that cache so that the updated permissions is fetched instead of using the outdated cached value of 0
# from before member creation
@project.team.purge_member_access_cache_for_user_id(owner_user.id)
end
end
def create_readme
commit_attrs = {
branch_name: default_branch,
commit_message: 'Initial commit',
file_path: 'README.md',
file_content: readme_content
}
Files::CreateService.new(@project, current_user, commit_attrs).execute
end
def create_sast_commit
::Security::CiConfiguration::SastCreateService.new(@project, current_user, { initialize_with_sast: true }, commit_on_default: true).execute
end
def repository_object_format
@use_sha256_repository ? Repository::FORMAT_SHA256 : Repository::FORMAT_SHA1
end
def readme_content
readme_attrs = {
default_branch: default_branch
}
@readme_template.presence || ReadmeRendererService.new(@project, current_user, readme_attrs).execute
end
def skip_wiki?
[email protected]_available?(:wiki, current_user) || @skip_wiki
end
def save_project_and_import_data
Gitlab::Database::QueryAnalyzers::PreventCrossDatabaseModification.temporary_ignore_tables_in_transaction(
%w[routes redirect_routes], url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/424281'
) do
ApplicationRecord.transaction do
@project.build_or_assign_import_data(data: @import_data[:data], credentials: @import_data[:credentials]) if @import_data
# Avoid project callbacks being triggered multiple times by saving the parent first.
# See https://github.com/rails/rails/issues/41701.
Namespaces::ProjectNamespace.create_from_project!(@project) if @project.valid?
if @project.saved?
Integration.create_from_active_default_integrations(@project, :project_id)
@project.create_labels unless @project.gitlab_project_import?
next if @project.import?
unless @project.create_repository(default_branch: default_branch, object_format: repository_object_format)
raise 'Failed to create repository'
end
end
end
end
end
def fail(error:)
message = "Unable to save project. Error: #{error}"
log_message = message.dup
log_message << " Project ID: #{@project.id}" if @project&.id
Gitlab::AppLogger.error(log_message)
if @project && @project.persisted? && @project.import_state
@project.import_state.mark_as_failed(message)
end
@project
end
def set_project_name_from_path
# if both name and path set - everything is ok
return if @project.name.present? && @project.path.present?
if @project.path.present?
# Set project name from path
@project.name = @project.path.dup
elsif @project.name.present?
# For compatibility - set path from name
@project.path = @project.name.dup
# TODO: Retained for backwards compatibility. Remove in API v5.
# When removed, validation errors will get bubbled up automatically.
# See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52725
unless @project.path.match?(Gitlab::PathRegex.project_path_format_regex)
@project.path = @project.path.parameterize
end
end
end
def extra_attributes_for_measurement
{
current_user: current_user&.name,
project_full_path: "#{parent_namespace&.full_path}/#{@params[:path]}"
}
end
private
def default_branch
@default_branch.presence || @project.default_branch_or_main
end
def validate_import_source_enabled!
return unless @params[:import_type]
import_type = @params[:import_type].to_s
return if INTERNAL_IMPORT_SOURCES.include?(import_type)
# Skip validation when creating project from a built in template
return if @params[:import_export_upload].present? && import_type == 'gitlab_project'
unless ::Gitlab::CurrentSettings.import_sources&.include?(import_type)
raise ImportSourceDisabledError, "#{import_type} import source is disabled"
end
end
def parent_namespace
@parent_namespace ||= Namespace.find_by_id(@params[:namespace_id]) || current_user.namespace
end
def create_from_template?
@params[:template_name].present? || @params[:template_project_id].present?
end
def import_schedule
if @project.errors.empty?
@project.import_state.schedule if @project.import? && [email protected]_project_migration?
else
fail(error: @project.errors.full_messages.join(', '))
end
end
def project_visibility
@project_visibility ||= Gitlab::VisibilityLevelChecker
.new(current_user, @project, project_params: { import_data: @import_data })
.level_restricted?
end
def build_topics
topics = params.delete(:topics)
tag_list = params.delete(:tag_list)
topic_list = topics || tag_list
params[:topic_list] ||= topic_list if topic_list
end
def publish_event
event = Projects::ProjectCreatedEvent.new(data: {
project_id: project.id,
namespace_id: project.namespace_id,
root_namespace_id: project.root_namespace.id
})
Gitlab::EventStore.publish(event)
end
end
end
Projects::CreateService.prepend_mod_with('Projects::CreateService')
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::CreateService, '#execute', feature_category: :groups_and_projects do
include ExternalAuthorizationServiceHelpers
let(:user) { create :user }
let(:project_name) { 'GitLab' }
let(:opts) do
{
name: project_name,
namespace_id: user.namespace.id
}
end
context 'with labels' do
subject(:project) { create_project(user, opts) }
before_all do
Label.create!(title: 'bug', template: true)
end
it 'creates labels on project creation' do
expect(project.labels).to include have_attributes(
type: eq('ProjectLabel'),
project_id: eq(project.id),
title: eq('bug')
)
end
context 'using gitlab project import' do
before do
opts[:import_type] = 'gitlab_project'
end
it 'does not creates labels on project creation' do
expect(project.labels.size).to eq(0)
end
end
end
describe 'setting name and path' do
subject(:project) { create_project(user, opts) }
context 'when both are set' do
let(:opts) { { name: 'one', path: 'two' } }
it 'keeps them as specified' do
expect(project.name).to eq('one')
expect(project.path).to eq('two')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
context 'when path is set' do
let(:opts) { { path: 'one.two_three-four' } }
it 'sets name == path' do
expect(project.path).to eq('one.two_three-four')
expect(project.name).to eq(project.path)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
context 'when name is a valid path' do
let(:opts) { { name: 'one.two_three-four' } }
it 'sets path == name' do
expect(project.name).to eq('one.two_three-four')
expect(project.path).to eq(project.name)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
context 'when name is not a valid path' do
let(:opts) { { name: 'one.two_three-four and five' } }
# TODO: Retained for backwards compatibility. Remove in API v5.
# See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52725
it 'parameterizes the name' do
expect(project.name).to eq('one.two_three-four and five')
expect(project.path).to eq('one-two_three-four-and-five')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
describe 'topics' do
subject(:project) { create_project(user, opts) }
context "with 'topics' parameter" do
let(:opts) { { name: 'topic-project', topics: 'topics' } }
it 'keeps them as specified' do
expect(project.topic_list).to eq(%w[topics])
end
end
context "with 'topic_list' parameter" do
let(:opts) { { name: 'topic-project', topic_list: 'topic_list' } }
it 'keeps them as specified' do
expect(project.topic_list).to eq(%w[topic_list])
end
end
context "with 'tag_list' parameter (deprecated)" do
let(:opts) { { name: 'topic-project', tag_list: 'tag_list' } }
it 'keeps them as specified' do
expect(project.topic_list).to eq(%w[tag_list])
end
end
end
context 'user namespace' do
it 'creates a project in user namespace' do
project = create_project(user, opts)
expect(project).to be_valid
expect(project.first_owner).to eq(user)
expect(project.team.maintainers).not_to include(user)
expect(project.team.owners).to contain_exactly(user)
expect(project.namespace).to eq(user.namespace)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
context 'project_authorizations record creation' do
context 'when the project_authrizations records are not created via the callback' do
it 'still creates project_authrizations record for the user' do
# stub out the callback that creates project_authorizations records on the `ProjectMember` model.
expect_next_instance_of(ProjectMember) do |member|
expect(member).to receive(:refresh_member_authorized_projects).and_return(nil)
end
project = create_project(user, opts)
expected_record = project.project_authorizations.where(
user: user,
access_level: ProjectMember::OWNER
)
expect(expected_record).to exist
end
end
end
context 'when the passed in namespace is for a bot user' do
let(:bot_user) { create(:user, :project_bot) }
let(:opts) do
{ name: project_name, namespace_id: bot_user.namespace.id }
end
it 'raises an error' do
project = create_project(bot_user, opts)
expect(project.errors.errors.length).to eq 1
expect(project.errors.messages[:namespace].first).to eq("is not valid")
end
end
end
describe 'after create actions' do
it 'invalidate personal_projects_count caches' do
expect(Rails.cache).to receive(:delete).with(['users', user.id, 'personal_projects_count'])
create_project(user, opts)
end
it 'creates associated project settings' do
project = create_project(user, opts)
expect(project.project_setting).to be_persisted
end
it_behaves_like 'storing arguments in the application context' do
let(:expected_params) { { project: subject.full_path } }
subject { create_project(user, opts) }
end
it 'logs creation' do
expect(Gitlab::AppLogger).to receive(:info).with(/#{user.name} created a new project/)
create_project(user, opts)
end
it 'publishes a ProjectCreatedEvent' do
group = create(:group, :nested).tap do |group|
group.add_owner(user)
end
expect { create_project(user, name: 'Project', path: 'project', namespace_id: group.id) }
.to publish_event(Projects::ProjectCreatedEvent)
.with(
project_id: kind_of(Numeric),
namespace_id: group.id,
root_namespace_id: group.parent_id
)
end
end
context "admin creates project with other user's namespace_id" do
context 'when admin mode is enabled', :enable_admin_mode do
it 'sets the correct permissions' do
admin = create(:admin)
project = create_project(admin, opts)
expect(project).to be_persisted
expect(project.owner).to eq(user)
expect(project.first_owner).to eq(user)
expect(project.team.owners).to contain_exactly(user)
expect(project.namespace).to eq(user.namespace)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
context 'when admin mode is disabled' do
it 'is not allowed' do
admin = create(:admin)
project = create_project(admin, opts)
expect(project).not_to be_persisted
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
context 'group namespace' do
let(:group) do
create(:group).tap do |group|
group.add_owner(user)
end
end
before do
user.refresh_authorized_projects # Ensure cache is warm
end
subject(:project) { create_project(user, opts.merge!(namespace_id: group.id)) }
shared_examples 'has sync-ed traversal_ids' do
specify { expect(project.reload.project_namespace.traversal_ids).to eq([project.namespace.traversal_ids, project.project_namespace.id].flatten.compact) }
end
it 'creates the project' do
expect(project).to be_valid
expect(project.owner).to eq(group)
expect(project.namespace).to eq(group)
expect(project.team.owners).to include(user)
expect(user.authorized_projects).to include(project)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
it_behaves_like 'has sync-ed traversal_ids'
context 'when project is an import' do
before do
stub_application_setting(import_sources: ['gitlab_project'])
end
context 'when user is not allowed to import projects' do
let(:group) do
create(:group).tap do |group|
group.add_developer(user)
end
end
it 'does not create the project' do
project = create_project(user, opts.merge!(namespace_id: group.id, import_type: 'gitlab_project'))
expect(project).not_to be_persisted
expect(project.errors.messages[:user].first).to eq('is not allowed to import projects')
end
end
end
end
context 'group sharing', :sidekiq_inline do
let_it_be(:group) { create(:group) }
let_it_be(:shared_group) { create(:group) }
let_it_be(:shared_group_user) { create(:user) }
let(:opts) do
{
name: project_name,
namespace_id: shared_group.id
}
end
before do
create(:group_group_link, shared_group: shared_group, shared_with_group: group)
shared_group.add_maintainer(shared_group_user)
group.add_developer(user)
end
it 'updates authorization' do
shared_group_project = create_project(shared_group_user, opts)
expect(
Ability.allowed?(shared_group_user, :read_project, shared_group_project)
).to be_truthy
expect(
Ability.allowed?(user, :read_project, shared_group_project)
).to be_truthy
end
end
context 'user with project limit' do
let_it_be(:user_with_projects_limit) { create(:user, projects_limit: 0) }
let(:params) { opts.merge!(namespace_id: target_namespace.id) }
subject(:project) { create_project(user_with_projects_limit, params) }
context 'under personal namespace' do
let(:target_namespace) { user_with_projects_limit.namespace }
it 'cannot create a project' do
expect(project.errors.errors.length).to eq 1
expect(project.errors.messages[:limit_reached].first).to eq(_('You cannot create projects in your personal namespace. Contact your GitLab administrator.'))
end
end
context 'under group namespace' do
let_it_be(:group) do
create(:group).tap do |group|
group.add_owner(user_with_projects_limit)
end
end
let(:target_namespace) { group }
it 'can create a project' do
expect(project).to be_valid
expect(project).to be_saved
expect(project.errors.errors.length).to eq 0
end
end
end
context 'membership overrides', :sidekiq_inline do
let_it_be(:group) { create(:group, :private) }
let_it_be(:subgroup_for_projects) { create(:group, :private, parent: group) }
let_it_be(:subgroup_for_access) { create(:group, :private, parent: group) }
let_it_be(:group_maintainer) { create(:user) }
let(:group_access_level) { Gitlab::Access::REPORTER }
let(:subgroup_access_level) { Gitlab::Access::DEVELOPER }
let(:share_max_access_level) { Gitlab::Access::MAINTAINER }
let(:opts) do
{
name: project_name,
namespace_id: subgroup_for_projects.id
}
end
before do
group.add_maintainer(group_maintainer)
create(
:group_group_link,
shared_group: subgroup_for_projects,
shared_with_group: subgroup_for_access,
group_access: share_max_access_level
)
end
context 'membership is higher from group hierarchy' do
let(:group_access_level) { Gitlab::Access::MAINTAINER }
it 'updates authorization' do
create(:group_member, access_level: subgroup_access_level, group: subgroup_for_access, user: user)
create(:group_member, access_level: group_access_level, group: group, user: user)
subgroup_project = create_project(group_maintainer, opts)
project_authorization = ProjectAuthorization.where(
project_id: subgroup_project.id,
user_id: user.id,
access_level: group_access_level)
expect(project_authorization).to exist
end
end
context 'membership is higher from group share' do
let(:subgroup_access_level) { Gitlab::Access::MAINTAINER }
context 'share max access level is not limiting' do
it 'updates authorization' do
create(:group_member, access_level: group_access_level, group: group, user: user)
create(:group_member, access_level: subgroup_access_level, group: subgroup_for_access, user: user)
subgroup_project = create_project(group_maintainer, opts)
project_authorization = ProjectAuthorization.where(
project_id: subgroup_project.id,
user_id: user.id,
access_level: subgroup_access_level)
expect(project_authorization).to exist
end
end
context 'share max access level is limiting' do
let(:share_max_access_level) { Gitlab::Access::DEVELOPER }
it 'updates authorization' do
create(:group_member, access_level: group_access_level, group: group, user: user)
create(:group_member, access_level: subgroup_access_level, group: subgroup_for_access, user: user)
subgroup_project = create_project(group_maintainer, opts)
project_authorization = ProjectAuthorization.where(
project_id: subgroup_project.id,
user_id: user.id,
access_level: share_max_access_level)
expect(project_authorization).to exist
end
end
end
end
context 'error handling' do
it 'handles invalid options' do
opts[:invalid] = 'option'
expect(create_project(user, opts)).to eq(nil)
end
end
context 'wiki_enabled creates repository directory' do
context 'wiki_enabled true creates wiki repository directory' do
it do
project = create_project(user, opts)
expect(wiki_repo(project).exists?).to be_truthy
end
end
context 'wiki_enabled false does not create wiki repository directory' do
it do
opts[:wiki_enabled] = false
project = create_project(user, opts)
expect(wiki_repo(project).exists?).to be_falsey
end
end
def wiki_repo(project)
relative_path = ProjectWiki.new(project).disk_path + '.git'
Gitlab::Git::Repository.new(project.repository_storage, relative_path, 'foobar', project.full_path)
end
end
context 'import data' do
let(:import_data) { { data: { 'test' => 'some data' } } }
let(:imported_project) { create_project(user, { name: 'test', import_url: 'http://import-url', import_data: import_data }) }
it 'does not write repository config' do
expect_next_instance_of(Project) do |project|
expect(project).not_to receive(:set_full_path)
end
imported_project
expect(imported_project.project_namespace).to be_in_sync_with_project(imported_project)
end
it 'stores import data and URL' do
expect(imported_project.import_data).to be_persisted
expect(imported_project.import_data.data).to eq(import_data[:data])
expect(imported_project.import_url).to eq('http://import-url')
end
it 'tracks for imported project' do
imported_project
expect_snowplow_event(category: described_class.name, action: 'import_project', user: user)
end
describe 'import scheduling' do
context 'when project import type is gitlab project migration' do
it 'does not schedule project import' do
opts[:import_type] = 'gitlab_project_migration'
project = create_project(user, opts)
expect(project.import_state.status).to eq('none')
end
end
end
end
context 'builds_enabled global setting' do
let(:project) { create_project(user, opts) }
subject { project.builds_enabled? }
context 'global builds_enabled false does not enable CI by default' do
before do
project.project_feature.update_attribute(:builds_access_level, ProjectFeature::DISABLED)
end
it { is_expected.to be_falsey }
end
context 'global builds_enabled true does enable CI by default' do
it { is_expected.to be_truthy }
end
end
context 'default visibility level' do
let(:group) { create(:group, :private) }
using RSpec::Parameterized::TableSyntax
where(:case_name, :group_level, :project_level) do
[
['in public group', Gitlab::VisibilityLevel::PUBLIC, Gitlab::VisibilityLevel::INTERNAL],
['in internal group', Gitlab::VisibilityLevel::INTERNAL, Gitlab::VisibilityLevel::INTERNAL],
['in private group', Gitlab::VisibilityLevel::PRIVATE, Gitlab::VisibilityLevel::PRIVATE]
]
end
with_them do
before do
stub_application_setting(default_project_visibility: Gitlab::VisibilityLevel::INTERNAL)
group.add_developer(user)
group.update!(visibility_level: group_level)
opts.merge!(
name: 'test',
namespace: group,
path: 'foo'
)
end
it 'creates project with correct visibility level', :aggregate_failures do
project = create_project(user, opts)
expect(project).to respond_to(:errors)
expect(project.errors).to be_blank
expect(project.visibility_level).to eq(project_level)
expect(project).to be_saved
expect(project).to be_valid
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
context 'restricted visibility level' do
before do
stub_application_setting(restricted_visibility_levels: [Gitlab::VisibilityLevel::PUBLIC])
end
shared_examples 'restricted visibility' do
it 'does not allow a restricted visibility level for non-admins' do
project = create_project(user, opts)
expect(project).to respond_to(:errors)
expect(project.errors.messages).to have_key(:visibility_level)
expect(project.errors.messages[:visibility_level].first).to(
match('restricted by your GitLab administrator')
)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
it 'does not allow a restricted visibility level for admins when admin mode is disabled' do
admin = create(:admin)
project = create_project(admin, opts)
expect(project.errors.any?).to be(true)
expect(project.saved?).to be_falsey
end
it 'allows a restricted visibility level for admins when admin mode is enabled', :enable_admin_mode do
admin = create(:admin)
project = create_project(admin, opts)
expect(project.errors.any?).to be(false)
expect(project.saved?).to be(true)
end
end
context 'when visibility is project based' do
before do
opts.merge!(
visibility_level: Gitlab::VisibilityLevel::PUBLIC
)
end
include_examples 'restricted visibility'
end
context 'when visibility is overridden' do
let(:visibility) { 'public' }
before do
opts.merge!(
import_data: {
data: {
override_params: {
visibility: visibility
}
}
}
)
end
include_examples 'restricted visibility'
context 'when visibility is misspelled' do
let(:visibility) { 'publik' }
it 'does not restrict project creation' do
project = create_project(user, opts)
expect(project.errors.any?).to be(false)
expect(project.saved?).to be(true)
end
end
end
end
context 'repository creation' do
it 'synchronously creates the repository' do
expect_next_instance_of(Project) do |instance|
expect(instance).to receive(:create_repository).and_return(true)
end
project = create_project(user, opts)
expect(project).to be_valid
expect(project).to be_persisted
expect(project.owner).to eq(user)
expect(project.namespace).to eq(user.namespace)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
it 'raises when repository fails to create' do
expect_next_instance_of(Project) do |instance|
expect(instance).to receive(:create_repository).and_return(false)
end
project = create_project(user, opts)
expect(project).not_to be_persisted
expect(project.errors.messages).to have_key(:base)
expect(project.errors.messages[:base].first).to match('Failed to create repository')
end
context 'when another repository already exists on disk' do
let(:opts) do
{
name: 'existing',
namespace_id: user.namespace.id
}
end
context 'with legacy storage' do
let(:raw_fake_repo) { Gitlab::Git::Repository.new('default', File.join(user.namespace.full_path, 'existing.git'), nil, nil) }
before do
stub_application_setting(hashed_storage_enabled: false)
raw_fake_repo.create_repository
end
after do
raw_fake_repo.remove
end
it 'does not allow to create a project when path matches existing repository on disk' do
project = create_project(user, opts)
expect(project).not_to be_persisted
expect(project).to respond_to(:errors)
expect(project.errors.messages).to have_key(:base)
expect(project.errors.messages[:base].first).to match('There is already a repository with that name on disk')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
it 'does not allow to import project when path matches existing repository on disk' do
project = create_project(user, opts.merge({ import_url: 'https://gitlab.com/gitlab-org/gitlab-test.git' }))
expect(project).not_to be_persisted
expect(project).to respond_to(:errors)
expect(project.errors.messages).to have_key(:base)
expect(project.errors.messages[:base].first).to match('There is already a repository with that name on disk')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
context 'with hashed storage' do
let(:hash) { '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b' }
let(:hashed_path) { '@hashed/6b/86/6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b' }
let(:raw_fake_repo) { Gitlab::Git::Repository.new('default', "#{hashed_path}.git", nil, nil) }
before do
allow(Digest::SHA2).to receive(:hexdigest) { hash }
raw_fake_repo.create_repository
end
after do
raw_fake_repo.remove
end
it 'does not allow to create a project when path matches existing repository on disk' do
project = create_project(user, opts)
expect(project).not_to be_persisted
expect(project).to respond_to(:errors)
expect(project.errors.messages).to have_key(:base)
expect(project.errors.messages[:base].first).to match('There is already a repository with that name on disk')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
end
context 'when readme initialization is requested' do
let(:project) { create_project(user, opts) }
before do
opts[:initialize_with_readme] = '1'
end
shared_examples 'a repo with a README.md' do
it { expect(project.repository.commit_count).to be(1) }
it { expect(project.repository.readme.name).to eql('README.md') }
it { expect(project.repository.readme.data).to include(expected_content) }
end
it_behaves_like 'a repo with a README.md' do
let(:expected_content) do
<<~MARKDOWN
cd existing_repo
git remote add origin #{project.http_url_to_repo}
git branch -M master
git push -uf origin master
MARKDOWN
end
end
context 'and a readme_template is specified' do
before do
opts[:readme_template] = "# GitLab\nThis is customized readme."
end
it_behaves_like 'a repo with a README.md' do
let(:expected_content) { "# GitLab\nThis is customized readme." }
end
end
context 'and default_branch is specified' do
before do
opts[:default_branch] = 'example_branch'
end
it 'creates the correct branch' do
expect(project.repository.branch_names).to contain_exactly('example_branch')
end
it_behaves_like 'a repo with a README.md' do
let(:expected_content) do
<<~MARKDOWN
cd existing_repo
git remote add origin #{project.http_url_to_repo}
git branch -M example_branch
git push -uf origin example_branch
MARKDOWN
end
end
end
context 'and the default branch setting is configured' do
before do
allow(Gitlab::CurrentSettings).to receive(:default_branch_name).and_return('example_branch')
end
it 'creates the correct branch' do
expect(project.repository.branch_names).to contain_exactly('example_branch')
end
it_behaves_like 'a repo with a README.md' do
let(:expected_content) do
<<~MARKDOWN
cd existing_repo
git remote add origin #{project.http_url_to_repo}
git branch -M example_branch
git push -uf origin example_branch
MARKDOWN
end
end
end
end
context 'when SAST initialization is requested' do
let(:project) { create_project(user, opts) }
before do
opts[:initialize_with_sast] = '1'
allow(Gitlab::CurrentSettings).to receive(:default_branch_name).and_return('main')
end
it 'creates a commit for SAST', :aggregate_failures do
expect(project.repository.commit_count).to be(1)
expect(project.repository.commit.message).to eq(
'Configure SAST in `.gitlab-ci.yml`, creating this file if it does not already exist'
)
end
end
context 'when SHA256 format is requested' do
let(:project) { create_project(user, opts) }
let(:opts) { super().merge(initialize_with_readme: true, use_sha256_repository: true) }
before do
allow(Gitlab::CurrentSettings).to receive(:default_branch_name).and_return('main')
end
it 'creates a repository with SHA256 commit hashes', :aggregate_failures do
expect(project.repository.commit_count).to be(1)
expect(project.commit.id.size).to eq 64
end
context 'when "support_sha256_repositories" feature flag is disabled' do
before do
stub_feature_flags(support_sha256_repositories: false)
end
it 'creates a repository with default SHA1 commit hash' do
expect(project.repository.commit_count).to be(1)
expect(project.commit.id.size).to eq 40
end
end
end
describe 'create integration for the project' do
subject(:project) { create_project(user, opts) }
context 'with an active instance-level integration' do
let!(:instance_integration) { create(:prometheus_integration, :instance, api_url: 'https://prometheus.instance.com/') }
it 'creates an integration from the instance-level integration' do
expect(project.integrations.count).to eq(1)
expect(project.integrations.first.api_url).to eq(instance_integration.api_url)
expect(project.integrations.first.inherit_from_id).to eq(instance_integration.id)
end
context 'with an active group-level integration' do
let!(:group_integration) { create(:prometheus_integration, :group, group: group, api_url: 'https://prometheus.group.com/') }
let!(:group) do
create(:group).tap do |group|
group.add_owner(user)
end
end
let(:opts) do
{
name: project_name,
namespace_id: group.id
}
end
it 'creates an integration from the group-level integration' do
expect(project.integrations.count).to eq(1)
expect(project.integrations.first.api_url).to eq(group_integration.api_url)
expect(project.integrations.first.inherit_from_id).to eq(group_integration.id)
end
context 'with an active subgroup' do
let!(:subgroup_integration) { create(:prometheus_integration, :group, group: subgroup, api_url: 'https://prometheus.subgroup.com/') }
let!(:subgroup) do
create(:group, parent: group).tap do |subgroup|
subgroup.add_owner(user)
end
end
let(:opts) do
{
name: project_name,
namespace_id: subgroup.id
}
end
it 'creates an integration from the subgroup-level integration' do
expect(project.integrations.count).to eq(1)
expect(project.integrations.first.api_url).to eq(subgroup_integration.api_url)
expect(project.integrations.first.inherit_from_id).to eq(subgroup_integration.id)
end
end
end
end
end
context 'when skip_disk_validation is used' do
it 'sets the project attribute' do
opts[:skip_disk_validation] = true
project = create_project(user, opts)
expect(project.skip_disk_validation).to be_truthy
end
end
it 'calls the passed block' do
fake_block = double('block')
opts[:relations_block] = fake_block
expect_next_instance_of(Project) do |project|
expect(fake_block).to receive(:call).with(project)
end
create_project(user, opts)
end
it 'writes project full path to gitaly' do
project = create_project(user, opts)
expect(project.repository.full_path).to eq project.full_path
end
it 'triggers PostCreationWorker' do
expect(Projects::PostCreationWorker).to receive(:perform_async).with(a_kind_of(Integer))
create_project(user, opts)
end
context 'when import source is enabled' do
before do
stub_application_setting(import_sources: ['github'])
end
it 'does not raise an error when import_source is string' do
opts[:import_type] = 'github'
project = create_project(user, opts)
expect(project).to be_persisted
expect(project.errors).to be_blank
end
it 'does not raise an error when import_source is symbol' do
opts[:import_type] = :github
project = create_project(user, opts)
expect(project).to be_persisted
expect(project.errors).to be_blank
end
end
context 'when import source is disabled' do
before do
stub_application_setting(import_sources: [])
opts[:import_type] = 'git'
end
it 'raises an error' do
project = create_project(user, opts)
expect(project).to respond_to(:errors)
expect(project.errors).to have_key(:import_source_disabled)
expect(project.saved?).to be_falsey
end
end
context 'with external authorization enabled' do
before do
enable_external_authorization_service_check
end
it 'does not save the project with an error if the service denies access' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'new-label', any_args) { false }
project = create_project(user, opts.merge({ external_authorization_classification_label: 'new-label' }))
expect(project.errors[:external_authorization_classification_label]).to be_present
expect(project).not_to be_persisted
end
it 'saves the project when the user has access to the label' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'new-label', any_args) { true }.at_least(1).time
project = create_project(user, opts.merge({ external_authorization_classification_label: 'new-label' }))
expect(project).to be_persisted
expect(project.external_authorization_classification_label).to eq('new-label')
end
it 'does not save the project when the user has no access to the default label and no label is provided' do
expect(::Gitlab::ExternalAuthorization)
.to receive(:access_allowed?).with(user, 'default_label', any_args) { false }
project = create_project(user, opts)
expect(project.errors[:external_authorization_classification_label]).to be_present
expect(project).not_to be_persisted
end
end
context 'with specialized project_authorization workers' do
let_it_be(:other_user) { create(:user) }
let_it_be(:group) { create(:group) }
let(:opts) do
{
name: project_name,
namespace_id: group.id
}
end
before do
group.add_maintainer(user)
group.add_developer(other_user)
end
it 'updates authorization for current_user' do
project = create_project(user, opts)
expect(
Ability.allowed?(user, :read_project, project)
).to be_truthy
end
it 'schedules authorization update for users with access to group', :sidekiq_inline do
stub_feature_flags(do_not_run_safety_net_auth_refresh_jobs: false)
expect(AuthorizedProjectsWorker).not_to(
receive(:bulk_perform_async)
)
expect(AuthorizedProjectUpdate::ProjectRecalculateWorker).to(
receive(:perform_async).and_call_original
)
expect(AuthorizedProjectUpdate::UserRefreshFromReplicaWorker).to(
receive(:bulk_perform_in).with(
1.hour,
array_including([user.id], [other_user.id]),
batch_delay: 30.seconds, batch_size: 100
).and_call_original
)
project = create_project(user, opts)
expect(
Ability.allowed?(other_user, :developer_access, project)
).to be_truthy
end
end
def create_project(user, opts)
Projects::CreateService.new(user, opts).execute
end
context 'shared Runners config' do
using RSpec::Parameterized::TableSyntax
let_it_be(:user) { create :user }
context 'when parent group is present' do
let_it_be(:group, reload: true) do
create(:group) do |group|
group.add_owner(user)
end
end
before do
group.update!(shared_runners_enabled: shared_runners_enabled,
allow_descendants_override_disabled_shared_runners: allow_to_override)
user.refresh_authorized_projects # Ensure cache is warm
end
context 'default value based on parent group setting' do
where(:shared_runners_enabled, :allow_to_override, :desired_config_for_new_project, :expected_result_for_project) do
true | false | nil | true
false | true | nil | false
false | false | nil | false
end
with_them do
it 'creates project following the parent config' do
params = opts.merge(namespace_id: group.id)
params = params.merge(shared_runners_enabled: desired_config_for_new_project) unless desired_config_for_new_project.nil?
project = create_project(user, params)
expect(project).to be_valid
expect(project.shared_runners_enabled).to eq(expected_result_for_project)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
context 'parent group is present and allows desired config' do
where(:shared_runners_enabled, :allow_to_override, :desired_config_for_new_project, :expected_result_for_project) do
true | false | true | true
true | false | false | false
false | true | false | false
false | true | true | true
false | false | false | false
end
with_them do
it 'creates project following the parent config' do
params = opts.merge(namespace_id: group.id, shared_runners_enabled: desired_config_for_new_project)
project = create_project(user, params)
expect(project).to be_valid
expect(project.shared_runners_enabled).to eq(expected_result_for_project)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
context 'parent group is present and disallows desired config' do
where(:shared_runners_enabled, :allow_to_override, :desired_config_for_new_project) do
false | false | true
end
with_them do
it 'does not create project' do
params = opts.merge(namespace_id: group.id, shared_runners_enabled: desired_config_for_new_project)
project = create_project(user, params)
expect(project.persisted?).to eq(false)
expect(project).to be_invalid
expect(project.errors[:shared_runners_enabled]).to include('cannot be enabled because parent group does not allow it')
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
end
context 'parent group is not present' do
where(:desired_config, :expected_result) do
true | true
false | false
nil | true
end
with_them do
it 'follows desired config' do
opts[:shared_runners_enabled] = desired_config unless desired_config.nil?
project = create_project(user, opts)
expect(project).to be_valid
expect(project.shared_runners_enabled).to eq(expected_result)
expect(project.project_namespace).to be_in_sync_with_project(project)
end
end
end
end
context 'when using access_level params' do
def expect_not_disabled_features(project, exclude: [])
ProjectFeature::FEATURES.excluding(exclude)
.excluding(project.project_feature.send(:feature_validation_exclusion))
.each do |feature|
expect(project.project_feature.public_send(ProjectFeature.access_level_attribute(feature))).not_to eq(Featurable::DISABLED)
end
end
# repository is tested on its own below because it requires other features to be set as well
# package_registry has different behaviour and is modified from the model based on other attributes
ProjectFeature::FEATURES.excluding(:repository, :package_registry).each do |feature|
it "when using #{feature}", :aggregate_failures do
feature_attribute = ProjectFeature.access_level_attribute(feature)
opts[feature_attribute] = ProjectFeature.str_from_access_level(Featurable::DISABLED)
project = create_project(user, opts)
expect(project).to be_valid
expect(project.project_feature.public_send(feature_attribute)).to eq(Featurable::DISABLED)
expect_not_disabled_features(project, exclude: [feature])
end
end
it 'when using repository', :aggregate_failures do
# model validation will fail if builds or merge_requests have higher visibility than repository
disabled = ProjectFeature.str_from_access_level(Featurable::DISABLED)
opts[:repository_access_level] = disabled
opts[:builds_access_level] = disabled
opts[:merge_requests_access_level] = disabled
project = create_project(user, opts)
expect(project).to be_valid
expect(project.project_feature.repository_access_level).to eq(Featurable::DISABLED)
expect(project.project_feature.builds_access_level).to eq(Featurable::DISABLED)
expect(project.project_feature.merge_requests_access_level).to eq(Featurable::DISABLED)
expect_not_disabled_features(project, exclude: [:repository, :builds, :merge_requests])
end
end
it 'adds pages unique domain', feature_category: :pages do
stub_pages_setting(enabled: true)
expect(Gitlab::Pages)
.to receive(:add_unique_domain_to)
.and_call_original
project = create_project(user, opts)
expect(project.project_setting.pages_unique_domain_enabled).to eq(true)
expect(project.project_setting.pages_unique_domain).to be_present
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
# Used by project imports, it removes any potential paths
# included in an error message that could be stored in the DB
class ImportErrorFilter
ERROR_MESSAGE_FILTER = /[^\s]*#{File::SEPARATOR}[^\s]*(?=(\s|\z))/
FILTER_MESSAGE = '[FILTERED]'
def self.filter_message(message)
message.gsub(ERROR_MESSAGE_FILTER, FILTER_MESSAGE)
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ImportErrorFilter, feature_category: :importers do
it 'filters any full paths' do
message = 'Error importing into /my/folder Permission denied @ unlink_internal - /var/opt/gitlab/gitlab-rails/shared/a/b/c/uploads/file'
expect(described_class.filter_message(message)).to eq('Error importing into [FILTERED] Permission denied @ unlink_internal - [FILTERED]')
end
it 'filters any relative paths ignoring single slash ones' do
message = 'Error importing into my/project Permission denied @ unlink_internal - ../file/ and folder/../file'
expect(described_class.filter_message(message)).to eq('Error importing into [FILTERED] Permission denied @ unlink_internal - [FILTERED] and [FILTERED]')
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
module Alerting
class NotifyService < ::BaseProjectService
extend ::Gitlab::Utils::Override
include ::AlertManagement::AlertProcessing
include ::AlertManagement::Responses
def initialize(project, params)
super(project: project, params: params.to_h)
end
def execute(token, integration = nil)
@integration = integration
return bad_request unless valid_payload_size?
return forbidden unless active_integration?
return unauthorized unless valid_token?(token)
process_alert
return bad_request unless alert.persisted?
complete_post_processing_tasks
success(alert)
end
private
attr_reader :integration
alias_method :payload, :params
def valid_payload_size?
Gitlab::Utils::DeepSize.new(params).valid?
end
def active_integration?
integration&.active?
end
def valid_token?(token)
token == integration.token
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::Alerting::NotifyService, feature_category: :groups_and_projects do
let_it_be_with_reload(:project) { create(:project) }
let(:payload) { ActionController::Parameters.new(payload_raw).permit! }
let(:payload_raw) { {} }
let(:service) { described_class.new(project, payload) }
before do
stub_licensed_features(oncall_schedules: false, generic_alert_fingerprinting: false)
end
describe '#execute' do
include_context 'incident management settings enabled'
subject { service.execute(token, integration) }
context 'with HTTP integration' do
let_it_be_with_reload(:integration) { create(:alert_management_http_integration, project: project) }
context 'with valid token' do
let(:token) { integration.token }
context 'with valid payload' do
let_it_be(:environment) { create(:environment, project: project) }
let_it_be(:fingerprint) { 'testing' }
let_it_be(:source) { 'GitLab RSpec' }
let_it_be(:starts_at) { Time.current.change(usec: 0) }
let(:ended_at) { nil }
let(:domain) { 'operations' }
let(:payload_raw) do
{
title: 'alert title',
start_time: starts_at.rfc3339,
end_time: ended_at&.rfc3339,
severity: 'low',
monitoring_tool: source,
service: 'GitLab Test Suite',
description: 'Very detailed description',
hosts: ['1.1.1.1', '2.2.2.2'],
fingerprint: fingerprint,
gitlab_environment_name: environment.name
}.with_indifferent_access
end
let(:last_alert_attributes) do
AlertManagement::Alert.last.attributes
.except('id', 'iid', 'created_at', 'updated_at')
.with_indifferent_access
end
it_behaves_like 'processes new firing alert'
it_behaves_like 'properly assigns the alert properties'
include_examples 'handles race condition in alert creation'
it 'passes the integration to alert processing' do
expect(Gitlab::AlertManagement::Payload)
.to receive(:parse)
.with(project, payload.to_h, integration: integration)
.and_call_original
subject
end
context 'with partial payload' do
let_it_be(:source) { integration.name }
let_it_be(:payload_raw) do
{
title: 'alert title',
start_time: starts_at.rfc3339
}
end
include_examples 'processes never-before-seen alert'
it 'assigns the alert properties' do
subject
expect(last_alert_attributes).to match(
project_id: project.id,
title: payload_raw.fetch(:title),
started_at: Time.zone.parse(payload_raw.fetch(:start_time)),
severity: 'critical',
status: AlertManagement::Alert.status_value(:triggered),
events: 1,
hosts: [],
domain: 'operations',
payload: payload_raw.with_indifferent_access,
issue_id: nil,
description: nil,
monitoring_tool: nil,
service: nil,
fingerprint: nil,
ended_at: nil,
prometheus_alert_id: nil,
environment_id: nil
)
end
context 'with existing alert with matching payload' do
let_it_be(:fingerprint) { payload_raw.except(:start_time).stringify_keys }
let_it_be(:gitlab_fingerprint) { Gitlab::AlertManagement::Fingerprint.generate(fingerprint) }
let_it_be(:alert) { create(:alert_management_alert, project: project, fingerprint: gitlab_fingerprint) }
include_examples 'processes never-before-seen alert'
end
end
context 'with resolving payload' do
let(:ended_at) { Time.current.change(usec: 0) }
it_behaves_like 'processes recovery alert'
end
end
context 'with overlong payload' do
let(:payload_raw) { { 'the-payload-is-too-big' => true } }
before do
stub_const('::Gitlab::Utils::DeepSize::DEFAULT_MAX_DEPTH', 0)
end
it_behaves_like 'alerts service responds with an error and takes no actions', :bad_request
end
context 'with inactive integration' do
before do
integration.update!(active: false)
end
it_behaves_like 'alerts service responds with an error and takes no actions', :forbidden
end
end
context 'with invalid token' do
let(:token) { 'invalid-token' }
it_behaves_like 'alerts service responds with an error and takes no actions', :unauthorized
end
end
context 'without HTTP integration' do
let(:integration) { nil }
let(:token) { nil }
it_behaves_like 'alerts service responds with an error and takes no actions', :forbidden
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
module ContainerRepository
class DeleteTagsService < BaseService
LOG_DATA_BASE = { service_class: self.to_s }.freeze
def execute(container_repository)
@container_repository = container_repository
unless container_expiration_policy?
return error('access denied') unless can?(current_user, :destroy_container_image, project)
end
@tag_names = params[:tags]
return error('not tags specified') if @tag_names.blank?
return error('repository importing') if cancel_while_importing?
delete_tags
end
private
def delete_tags
delete_service
.execute
.tap { |response| log_response(response) }
end
def delete_service
if @container_repository.client.supports_tag_delete?
::Projects::ContainerRepository::Gitlab::DeleteTagsService.new(@container_repository, @tag_names)
else
::Projects::ContainerRepository::ThirdParty::DeleteTagsService.new(@container_repository, @tag_names)
end
end
def log_response(response)
log_data = LOG_DATA_BASE.merge(
container_repository_id: @container_repository.id,
project_id: @container_repository.project_id,
message: 'deleted tags',
deleted_tags_count: response[:deleted]&.size
).compact
if response[:status] == :success
log_info(log_data)
else
log_data[:message] = response[:message]
log_error(log_data)
end
end
def cancel_while_importing?
return true if @container_repository.importing?
if container_expiration_policy?
return @container_repository.pre_importing? || @container_repository.pre_import_done?
end
false
end
def container_expiration_policy?
params[:container_expiration_policy].present?
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ContainerRepository::DeleteTagsService, feature_category: :container_registry do
using RSpec::Parameterized::TableSyntax
include_context 'container repository delete tags service shared context'
let(:service) { described_class.new(project, user, params) }
let_it_be(:available_service_classes) do
[
::Projects::ContainerRepository::Gitlab::DeleteTagsService,
::Projects::ContainerRepository::ThirdParty::DeleteTagsService
]
end
shared_examples 'logging a success response' do
it 'logs an info message' do
expect(service).to receive(:log_info).with(
{
service_class: 'Projects::ContainerRepository::DeleteTagsService',
message: 'deleted tags',
container_repository_id: repository.id,
project_id: repository.project_id,
deleted_tags_count: tags.size
}
)
subject
end
end
shared_examples 'logging an error response' do |message: 'could not delete tags', extra_log: {}|
it 'logs an error message' do
log_data = {
service_class: 'Projects::ContainerRepository::DeleteTagsService',
message: message,
container_repository_id: repository.id,
project_id: repository.project_id
}
log_data.merge!(extra_log) if extra_log.any?
expect(service).to receive(:log_error).with(log_data)
subject
end
end
shared_examples 'calling the correct delete tags service' do |expected_service_class|
let(:service_response) { { status: :success, deleted: tags } }
let(:excluded_service_class) { available_service_classes.excluding(expected_service_class).first }
before do
service_double = double
expect(expected_service_class).to receive(:new).with(repository, tags).and_return(service_double)
expect(excluded_service_class).not_to receive(:new)
expect(service_double).to receive(:execute).and_return(service_response)
end
it { is_expected.to include(status: :success) }
it_behaves_like 'logging a success response'
context 'with an error service response' do
let(:service_response) { { status: :error, message: 'could not delete tags' } }
it { is_expected.to include(status: :error) }
it_behaves_like 'logging an error response'
end
end
shared_examples 'handling invalid params' do
context 'with invalid params' do
before do
expect(::Projects::ContainerRepository::Gitlab::DeleteTagsService).not_to receive(:new)
expect(::Projects::ContainerRepository::ThirdParty::DeleteTagsService).not_to receive(:new)
expect_any_instance_of(ContainerRegistry::Client).not_to receive(:delete_repository_tag_by_digest)
end
context 'when no params are specified' do
let_it_be(:params) { {} }
it { is_expected.to include(status: :error) }
end
context 'with empty tags' do
let_it_be(:tags) { [] }
it { is_expected.to include(status: :error) }
end
end
end
shared_examples 'supporting fast delete' do
context 'when the registry supports fast delete' do
before do
allow(repository.client).to receive(:supports_tag_delete?).and_return(true)
end
it_behaves_like 'calling the correct delete tags service', ::Projects::ContainerRepository::Gitlab::DeleteTagsService
it_behaves_like 'handling invalid params'
context 'with the real service' do
before do
stub_delete_reference_requests(tags)
expect_delete_tags(tags)
end
it { is_expected.to include(status: :success) }
it_behaves_like 'logging a success response'
end
context 'with a timeout error' do
before do
expect_next_instance_of(::Projects::ContainerRepository::Gitlab::DeleteTagsService) do |delete_service|
expect(delete_service).to receive(:delete_tags).and_raise(::Projects::ContainerRepository::Gitlab::DeleteTagsService::TimeoutError)
end
end
it { is_expected.to include(status: :error, message: 'error while deleting tags') }
it_behaves_like 'logging an error response', message: 'error while deleting tags', extra_log: { deleted_tags_count: 0 }
end
end
end
describe '#execute' do
let(:tags) { %w[A Ba] }
subject { service.execute(repository) }
context 'without permissions' do
it { is_expected.to include(status: :error) }
end
context 'with permissions' do
before do
project.add_developer(user)
end
it_behaves_like 'supporting fast delete'
context 'when the registry does not support fast delete' do
before do
allow(repository.client).to receive(:supports_tag_delete?).and_return(false)
end
it_behaves_like 'calling the correct delete tags service', ::Projects::ContainerRepository::ThirdParty::DeleteTagsService
it_behaves_like 'handling invalid params'
end
context 'when the repository is importing' do
where(:migration_state, :called_by_policy, :error_expected) do
'default' | false | false
'default' | true | false
'pre_importing' | false | false
'pre_importing' | true | true
'pre_import_done' | false | false
'pre_import_done' | true | true
'importing' | false | true
'importing' | true | true
'import_done' | false | false
'import_done' | true | false
'import_aborted' | false | false
'import_aborted' | true | false
'import_skipped' | false | false
'import_skipped' | true | false
end
with_them do
let(:params) { { tags: tags, container_expiration_policy: called_by_policy ? true : nil } }
before do
repository.update_columns(migration_state: migration_state, migration_import_started_at: Time.zone.now, migration_pre_import_started_at: Time.zone.now, migration_pre_import_done_at: Time.zone.now)
end
it 'returns an error response if expected' do
if error_expected
expect(subject).to include(status: :error, message: 'repository importing')
else
expect(service).to receive(:delete_tags).and_return(status: :success)
expect(subject).not_to include(status: :error)
end
end
end
end
end
context 'without user' do
let_it_be(:user) { nil }
context 'when not run by a cleanup policy' do
it { is_expected.to include(status: :error) }
end
context 'when run by a cleanup policy' do
let(:params) { { tags: tags, container_expiration_policy: true } }
it_behaves_like 'supporting fast delete'
end
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
module ContainerRepository
class CleanupTagsService < BaseContainerRepositoryService
def execute
return error('access denied') unless can_destroy?
return error('invalid regex') unless valid_regex?
cleanup_tags_service_class.new(container_repository: container_repository, current_user: current_user, params: params)
.execute
end
private
def cleanup_tags_service_class
log_data = {
container_repository_id: container_repository.id,
container_repository_path: container_repository.path,
project_id: project.id
}
if use_gitlab_service?
log_info(log_data.merge(gitlab_cleanup_tags_service: true))
::Projects::ContainerRepository::Gitlab::CleanupTagsService
else
log_info(log_data.merge(third_party_cleanup_tags_service: true))
::Projects::ContainerRepository::ThirdParty::CleanupTagsService
end
end
def use_gitlab_service?
container_repository.migrated? &&
container_repository.gitlab_api_client.supports_gitlab_api?
end
def can_destroy?
return true if container_expiration_policy
can?(current_user, :destroy_container_image, project)
end
def valid_regex?
%w[name_regex_delete name_regex name_regex_keep].each do |param_name|
regex = params[param_name]
::Gitlab::UntrustedRegexp.new(regex) unless regex.blank?
end
true
rescue RegexpError => e
::Gitlab::ErrorTracking.log_exception(e, project_id: project.id)
false
end
def container_expiration_policy
params['container_expiration_policy']
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ContainerRepository::CleanupTagsService, feature_category: :container_registry do
let_it_be_with_reload(:container_repository) { create(:container_repository) }
let_it_be(:user) { container_repository.project.owner }
let(:params) { {} }
let(:extra_params) { {} }
let(:service) { described_class.new(container_repository: container_repository, current_user: user, params: params.merge(extra_params)) }
before do
stub_container_registry_config(enabled: true)
end
describe '#execute' do
subject { service.execute }
shared_examples 'returning error message' do |message|
it "returns error #{message}" do
expect(::Projects::ContainerRepository::Gitlab::CleanupTagsService).not_to receive(:new)
expect(::Projects::ContainerRepository::ThirdParty::CleanupTagsService).not_to receive(:new)
expect(service).not_to receive(:log_info)
expect(subject).to eq(status: :error, message: message)
end
end
shared_examples 'handling invalid regular expressions' do
shared_examples 'handling invalid regex' do
it_behaves_like 'returning error message', 'invalid regex'
it 'calls error tracking service' do
expect(::Gitlab::ErrorTracking).to receive(:log_exception).and_call_original
subject
end
end
context 'when name_regex_delete is invalid' do
let(:extra_params) { { 'name_regex_delete' => '*test*' } }
it_behaves_like 'handling invalid regex'
end
context 'when name_regex is invalid' do
let(:extra_params) { { 'name_regex' => '*test*' } }
it_behaves_like 'handling invalid regex'
end
context 'when name_regex_keep is invalid' do
let(:extra_params) { { 'name_regex_keep' => '*test*' } }
it_behaves_like 'handling invalid regex'
end
end
shared_examples 'handling all types of container repositories' do
shared_examples 'calling service' do |service_class, extra_log_data: {}|
let(:service_double) { instance_double(service_class.to_s) }
it "uses cleanup tags service #{service_class}" do
expect(service_class).to receive(:new).with(container_repository: container_repository, current_user: user, params: params).and_return(service_double)
expect(service_double).to receive(:execute).and_return('return value')
expect(service).to receive(:log_info)
.with(
{
container_repository_id: container_repository.id,
container_repository_path: container_repository.path,
project_id: container_repository.project.id
}.merge(extra_log_data))
expect(subject).to eq('return value')
end
end
context 'with a migrated repository' do
before do
allow(container_repository).to receive(:migrated?).and_return(true)
end
context 'supporting the gitlab api' do
before do
allow(container_repository.gitlab_api_client).to receive(:supports_gitlab_api?).and_return(true)
end
it_behaves_like 'calling service', ::Projects::ContainerRepository::Gitlab::CleanupTagsService, extra_log_data: { gitlab_cleanup_tags_service: true }
end
context 'not supporting the gitlab api' do
before do
allow(container_repository.gitlab_api_client).to receive(:supports_gitlab_api?).and_return(false)
end
it_behaves_like 'calling service', ::Projects::ContainerRepository::ThirdParty::CleanupTagsService, extra_log_data: { third_party_cleanup_tags_service: true }
end
end
context 'with a non migrated repository' do
before do
allow(container_repository).to receive(:migrated?).and_return(false)
end
it_behaves_like 'calling service', ::Projects::ContainerRepository::ThirdParty::CleanupTagsService, extra_log_data: { third_party_cleanup_tags_service: true }
end
end
context 'with valid user' do
it_behaves_like 'handling invalid regular expressions'
it_behaves_like 'handling all types of container repositories'
end
context 'for container expiration policy' do
let(:user) { nil }
let(:params) { { 'container_expiration_policy' => true } }
it_behaves_like 'handling invalid regular expressions'
it_behaves_like 'handling all types of container repositories'
end
context 'with not allowed user' do
let_it_be(:user) { create(:user) }
it_behaves_like 'returning error message', 'access denied'
end
context 'with no user' do
let(:user) { nil }
it_behaves_like 'returning error message', 'access denied'
end
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
module ContainerRepository
class DestroyService < BaseService
CLEANUP_TAGS_SERVICE_PARAMS = {
'name_regex_delete' => '.*',
'container_expiration_policy' => true, # to avoid permissions checks
'keep_latest' => false
}.freeze
def execute(container_repository, disable_timeout: true)
return error('Unauthorized access') unless can_destroy?
# Delete tags outside of the transaction to avoid hitting an idle-in-transaction timeout
if delete_tags(container_repository, disable_timeout) &&
destroy_container_repository(container_repository)
success
else
container_repository.delete_failed!
error('Deletion failed for container repository')
end
end
private
def delete_tags(container_repository, disable_timeout)
service = Projects::ContainerRepository::CleanupTagsService.new(
container_repository: container_repository,
params: CLEANUP_TAGS_SERVICE_PARAMS.merge('disable_timeout' => disable_timeout)
)
result = service.execute
return true if result[:status] == :success
log_error(error_message(container_repository, 'error in deleting tags'))
false
end
def destroy_container_repository(container_repository)
return true if container_repository.destroy
log_error(error_message(container_repository, container_repository.errors.full_messages.join('. ')))
false
end
def can_destroy?
return true if skip_permission_check?
can?(current_user, :destroy_container_image, project)
end
def error_message(container_repository, message)
"Container repository with ID: #{container_repository.id} and path: #{container_repository.path} " \
"failed with message: #{message}"
end
def skip_permission_check?
!!params[:skip_permission_check]
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ContainerRepository::DestroyService, feature_category: :container_registry do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :private) }
let_it_be(:params) { {} }
subject { described_class.new(project, user, params) }
before do
stub_container_registry_config(enabled: true)
end
shared_examples 'returning an error status with message' do |error_message|
it 'returns an error status' do
response = subject.execute(repository)
expect(response).to include(status: :error, message: error_message)
end
end
shared_examples 'executing with permissions' do
let_it_be_with_refind(:repository) { create(:container_repository, :root, project: project) }
before do
stub_container_registry_tags(repository: :any, tags: %w[latest stable])
end
it 'deletes the repository' do
expect_cleanup_tags_service_with(container_repository: repository, return_status: :success)
expect { subject.execute(repository) }.to change { ContainerRepository.count }.by(-1)
end
it 'sends disable_timeout = true as part of the params as default' do
expect_cleanup_tags_service_with(container_repository: repository, return_status: :success, disable_timeout: true)
expect { subject.execute(repository) }.to change { ContainerRepository.count }.by(-1)
end
it 'sends disable_timeout = false as part of the params if it is set to false' do
expect_cleanup_tags_service_with(container_repository: repository, return_status: :success, disable_timeout: false)
expect { subject.execute(repository, disable_timeout: false) }.to change { ContainerRepository.count }.by(-1)
end
context 'when deleting the tags fails' do
before do
expect_cleanup_tags_service_with(container_repository: repository, return_status: :error)
allow(Gitlab::AppLogger).to receive(:error).and_call_original
end
it 'sets status as deleted_failed' do
subject.execute(repository)
expect(repository).to be_delete_failed
end
it 'logs the error' do
subject.execute(repository)
expect(Gitlab::AppLogger).to have_received(:error)
.with("Container repository with ID: #{repository.id} and path: #{repository.path} failed with message: error in deleting tags")
end
it_behaves_like 'returning an error status with message', 'Deletion failed for container repository'
end
context 'when destroying the repository fails' do
before do
expect_cleanup_tags_service_with(container_repository: repository, return_status: :success)
allow(repository).to receive(:destroy).and_return(false)
allow(repository.errors).to receive(:full_messages).and_return(['Error 1', 'Error 2'])
allow(Gitlab::AppLogger).to receive(:error).and_call_original
end
it 'sets status as deleted_failed' do
subject.execute(repository)
expect(repository).to be_delete_failed
end
it 'logs the error' do
subject.execute(repository)
expect(Gitlab::AppLogger).to have_received(:error)
.with("Container repository with ID: #{repository.id} and path: #{repository.path} failed with message: Error 1. Error 2")
end
it_behaves_like 'returning an error status with message', 'Deletion failed for container repository'
end
end
context 'when user has access to registry' do
before do
project.add_developer(user)
end
it_behaves_like 'executing with permissions'
end
context 'when user does not have access to registry' do
let_it_be(:repository) { create(:container_repository, :root, project: project) }
it 'does not delete a repository' do
expect { subject.execute(repository) }.not_to change { ContainerRepository.count }
end
it_behaves_like 'returning an error status with message', 'Unauthorized access'
end
context 'when called during project deletion' do
let(:user) { nil }
let(:params) { { skip_permission_check: true } }
it_behaves_like 'executing with permissions'
end
context 'when there is no user' do
let(:user) { nil }
let(:repository) { create(:container_repository, :root, project: project) }
it_behaves_like 'returning an error status with message', 'Unauthorized access'
end
def expect_cleanup_tags_service_with(container_repository:, return_status:, disable_timeout: true)
delete_tags_service = instance_double(Projects::ContainerRepository::CleanupTagsService)
expect(Projects::ContainerRepository::CleanupTagsService).to receive(:new).with(
container_repository: container_repository,
params: described_class::CLEANUP_TAGS_SERVICE_PARAMS.merge('disable_timeout' => disable_timeout)
).and_return(delete_tags_service)
expect(delete_tags_service).to receive(:execute).and_return(status: return_status)
end
end
|
Write RSpec test file for following ruby class
```ruby
# frozen_string_literal: true
module Projects
module ContainerRepository
module Gitlab
class DeleteTagsService
include BaseServiceUtility
include ::Gitlab::Utils::StrongMemoize
include ::Projects::ContainerRepository::Gitlab::Timeoutable
def initialize(container_repository, tag_names)
@container_repository = container_repository
@tag_names = tag_names
@deleted_tags = []
end
# Delete tags by name with a single DELETE request. This is only supported
# by the GitLab Container Registry fork. See
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23325 for details.
def execute
return success(deleted: []) if @tag_names.empty?
delete_tags
rescue TimeoutError, ::Faraday::Error => e
::Gitlab::ErrorTracking.track_exception(e, tags_count: @tag_names&.size, container_repository_id: @container_repository&.id)
error('error while deleting tags', nil, pass_back: { deleted: @deleted_tags, exception_class_name: e.class.name })
end
private
def delete_tags
start_time = Time.zone.now
@tag_names.each do |name|
raise TimeoutError if timeout?(start_time)
if @container_repository.delete_tag(name)
@deleted_tags.append(name)
end
end
@deleted_tags.any? ? success(deleted: @deleted_tags) : error("could not delete tags: #{@tag_names.join(', ')}".truncate(1000))
end
end
end
end
end
``` | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ContainerRepository::Gitlab::DeleteTagsService, feature_category: :container_registry do
include_context 'container repository delete tags service shared context'
let(:service) { described_class.new(repository, tags) }
describe '#execute' do
let(:tags) { %w[A Ba] }
subject { service.execute }
RSpec.shared_examples 'deleting tags' do
it 'deletes the tags by name' do
stub_delete_reference_requests(tags)
expect_delete_tags(tags)
is_expected.to eq(status: :success, deleted: tags)
end
end
context 'with tags to delete' do
let(:timeout) { 10 }
before do
stub_application_setting(container_registry_delete_tags_service_timeout: timeout)
end
it_behaves_like 'deleting tags'
it 'succeeds when tag delete returns 404' do
stub_delete_reference_requests('A' => 200, 'Ba' => 404)
is_expected.to eq(status: :success, deleted: tags)
end
it 'succeeds when a tag delete returns 500' do
stub_delete_reference_requests('A' => 200, 'Ba' => 500)
is_expected.to eq(status: :success, deleted: ['A'])
end
context 'with failures' do
context 'when the delete request fails' do
before do
stub_delete_reference_requests('A' => 500, 'Ba' => 500)
end
it { is_expected.to eq(status: :error, message: "could not delete tags: #{tags.join(', ')}") }
context 'when a large list of tag delete fails' do
let(:tags) { Array.new(135) { |i| "tag#{i}" } }
let(:container_repository) { instance_double(ContainerRepository) }
before do
allow(ContainerRepository).to receive(:find).with(repository).and_return(container_repository)
tags.each do |tag|
stub_delete_reference_requests(tag => 500)
end
allow(container_repository).to receive(:delete_tag).and_return(false)
end
it 'truncates the log message' do
expect(subject).to eq(status: :error, message: "could not delete tags: #{tags.join(', ')}".truncate(1000))
end
end
end
end
context 'with timeout' do
context 'set to a valid value' do
before do
allow(service).to receive(:timeout?).and_return(false, true)
stub_delete_reference_requests('A' => 200)
end
it { is_expected.to eq(status: :error, message: 'error while deleting tags', deleted: ['A'], exception_class_name: Projects::ContainerRepository::Gitlab::DeleteTagsService::TimeoutError.name) }
it 'tracks the exception' do
expect(::Gitlab::ErrorTracking)
.to receive(:track_exception).with(::Projects::ContainerRepository::Gitlab::DeleteTagsService::TimeoutError, tags_count: tags.size, container_repository_id: repository.id)
subject
end
end
context 'set to 0' do
let(:timeout) { 0 }
it_behaves_like 'deleting tags'
end
context 'set to nil' do
let(:timeout) { nil }
it_behaves_like 'deleting tags'
end
end
context 'with a network error' do
before do
expect(service).to receive(:delete_tags).and_raise(::Faraday::TimeoutError)
end
it { is_expected.to eq(status: :error, message: 'error while deleting tags', deleted: [], exception_class_name: ::Faraday::TimeoutError.name) }
it 'tracks the exception' do
expect(::Gitlab::ErrorTracking)
.to receive(:track_exception).with(::Faraday::TimeoutError, tags_count: tags.size, container_repository_id: repository.id)
subject
end
end
end
context 'with empty tags' do
let_it_be(:tags) { [] }
it 'does not remove anything' do
expect_any_instance_of(ContainerRegistry::Client).not_to receive(:delete_repository_tag_by_digest)
is_expected.to eq(status: :success, deleted: [])
end
end
end
end
|