提交 67cd2904 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 30b17460
...@@ -384,7 +384,6 @@ RSpec/LeakyConstantDeclaration: ...@@ -384,7 +384,6 @@ RSpec/LeakyConstantDeclaration:
- 'spec/mailers/notify_spec.rb' - 'spec/mailers/notify_spec.rb'
- 'spec/migrations/20191125114345_add_admin_mode_protected_path_spec.rb' - 'spec/migrations/20191125114345_add_admin_mode_protected_path_spec.rb'
- 'spec/migrations/encrypt_plaintext_attributes_on_application_settings_spec.rb' - 'spec/migrations/encrypt_plaintext_attributes_on_application_settings_spec.rb'
- 'spec/migrations/cleanup_optimistic_locking_nulls_pt2_fixed_spec.rb'
- 'spec/models/clusters/cluster_spec.rb' - 'spec/models/clusters/cluster_spec.rb'
- 'spec/models/concerns/batch_destroy_dependent_associations_spec.rb' - 'spec/models/concerns/batch_destroy_dependent_associations_spec.rb'
- 'spec/models/concerns/blocks_json_serialization_spec.rb' - 'spec/models/concerns/blocks_json_serialization_spec.rb'
......
...@@ -52,7 +52,7 @@ export default { ...@@ -52,7 +52,7 @@ export default {
...mapActions(['createTempEntry', 'renameEntry']), ...mapActions(['createTempEntry', 'renameEntry']),
submitForm() { submitForm() {
if (this.modalType === modalTypes.rename) { if (this.modalType === modalTypes.rename) {
if (!this.entries[this.entryName]?.deleted) { if (this.entries[this.entryName] && !this.entries[this.entryName].deleted) {
flash( flash(
sprintf(s__('The name "%{name}" is already taken in this directory.'), { sprintf(s__('The name "%{name}" is already taken in this directory.'), {
name: this.entryName, name: this.entryName,
......
...@@ -56,6 +56,19 @@ export const getMonthNames = abbreviated => { ...@@ -56,6 +56,19 @@ export const getMonthNames = abbreviated => {
export const pad = (val, len = 2) => `0${val}`.slice(-len); export const pad = (val, len = 2) => `0${val}`.slice(-len);
/**
* Returns i18n weekday names array.
*/
export const getWeekdayNames = () => [
__('Sunday'),
__('Monday'),
__('Tuesday'),
__('Wednesday'),
__('Thursday'),
__('Friday'),
__('Saturday'),
];
/** /**
* Given a date object returns the day of the week in English * Given a date object returns the day of the week in English
* @param {date} date * @param {date} date
......
<script> <script>
import { GlSprintf, GlLink } from '@gitlab/ui';
import { s__, sprintf } from '~/locale';
import { getWeekdayNames } from '~/lib/utils/datetime_utility';
export default { export default {
components: {
GlSprintf,
GlLink,
},
props: { props: {
initialCronInterval: { initialCronInterval: {
type: String, type: String,
...@@ -9,25 +17,51 @@ export default { ...@@ -9,25 +17,51 @@ export default {
}, },
data() { data() {
return { return {
isEditingCustom: false,
randomHour: this.generateRandomHour(),
randomWeekDayIndex: this.generateRandomWeekDayIndex(),
randomDay: this.generateRandomDay(),
inputNameAttribute: 'schedule[cron]', inputNameAttribute: 'schedule[cron]',
cronInterval: this.initialCronInterval, cronInterval: this.initialCronInterval,
cronIntervalPresets: {
everyDay: '0 4 * * *',
everyWeek: '0 4 * * 0',
everyMonth: '0 4 1 * *',
},
cronSyntaxUrl: 'https://en.wikipedia.org/wiki/Cron', cronSyntaxUrl: 'https://en.wikipedia.org/wiki/Cron',
customInputEnabled: false,
}; };
}, },
computed: { computed: {
cronIntervalPresets() {
return {
everyDay: `0 ${this.randomHour} * * *`,
everyWeek: `0 ${this.randomHour} * * ${this.randomWeekDayIndex}`,
everyMonth: `0 ${this.randomHour} ${this.randomDay} * *`,
};
},
intervalIsPreset() { intervalIsPreset() {
return Object.values(this.cronIntervalPresets).includes(this.cronInterval); return Object.values(this.cronIntervalPresets).includes(this.cronInterval);
}, },
// The text input is editable when there's a custom interval, or when it's formattedTime() {
// a preset interval and the user clicks the 'custom' radio button if (this.randomHour > 12) {
isEditable() { return `${this.randomHour - 12}:00pm`;
return Boolean(this.customInputEnabled || !this.intervalIsPreset); } else if (this.randomHour === 12) {
return `12:00pm`;
}
return `${this.randomHour}:00am`;
},
weekday() {
return getWeekdayNames()[this.randomWeekDayIndex];
},
everyDayText() {
return sprintf(s__(`Every day (at %{time})`), { time: this.formattedTime });
},
everyWeekText() {
return sprintf(s__('Every week (%{weekday} at %{time})'), {
weekday: this.weekday,
time: this.formattedTime,
});
},
everyMonthText() {
return sprintf(s__('Every month (Day %{day} at %{time})'), {
day: this.randomDay,
time: this.formattedTime,
});
}, },
}, },
watch: { watch: {
...@@ -39,14 +73,31 @@ export default { ...@@ -39,14 +73,31 @@ export default {
}); });
}, },
}, },
created() { // If at the mounting stage the default is still an empty string, we
if (this.intervalIsPreset) { // know we are not editing an existing field so we update it so
this.enableCustomInput = false; // that the default is the first radio option
mounted() {
if (this.cronInterval === '') {
this.cronInterval = this.cronIntervalPresets.everyDay;
} }
}, },
methods: { methods: {
setCustomInput(e) {
if (!this.isEditingCustom) {
this.isEditingCustom = true;
this.$refs.customInput.click();
// Because we need to manually trigger the click on the radio btn,
// it will add a space to update the v-model. If the user is typing
// and the space is added, it will feel very unituitive so we reset
// the value to the original
this.cronInterval = e.target.value;
}
if (this.intervalIsPreset) {
this.isEditingCustom = false;
}
},
toggleCustomInput(shouldEnable) { toggleCustomInput(shouldEnable) {
this.customInputEnabled = shouldEnable; this.isEditingCustom = shouldEnable;
if (shouldEnable) { if (shouldEnable) {
// We need to change the value so other radios don't remain selected // We need to change the value so other radios don't remain selected
...@@ -54,30 +105,21 @@ export default { ...@@ -54,30 +105,21 @@ export default {
this.cronInterval = `${this.cronInterval} `; this.cronInterval = `${this.cronInterval} `;
} }
}, },
generateRandomHour() {
return Math.floor(Math.random() * 23);
},
generateRandomWeekDayIndex() {
return Math.floor(Math.random() * 6);
},
generateRandomDay() {
return Math.floor(Math.random() * 28);
},
}, },
}; };
</script> </script>
<template> <template>
<div class="interval-pattern-form-group"> <div class="interval-pattern-form-group">
<div class="cron-preset-radio-input">
<input
id="custom"
:name="inputNameAttribute"
:value="cronInterval"
:checked="isEditable"
class="label-bold"
type="radio"
@click="toggleCustomInput(true)"
/>
<label for="custom"> {{ s__('PipelineSheduleIntervalPattern|Custom') }} </label>
<span class="cron-syntax-link-wrap">
(<a :href="cronSyntaxUrl" target="_blank"> {{ __('Cron syntax') }} </a>)
</span>
</div>
<div class="cron-preset-radio-input"> <div class="cron-preset-radio-input">
<input <input
id="every-day" id="every-day"
...@@ -89,7 +131,9 @@ export default { ...@@ -89,7 +131,9 @@ export default {
@click="toggleCustomInput(false)" @click="toggleCustomInput(false)"
/> />
<label class="label-bold" for="every-day"> {{ __('Every day (at 4:00am)') }} </label> <label class="label-bold" for="every-day">
{{ everyDayText }}
</label>
</div> </div>
<div class="cron-preset-radio-input"> <div class="cron-preset-radio-input">
...@@ -104,7 +148,7 @@ export default { ...@@ -104,7 +148,7 @@ export default {
/> />
<label class="label-bold" for="every-week"> <label class="label-bold" for="every-week">
{{ __('Every week (Sundays at 4:00am)') }} {{ everyWeekText }}
</label> </label>
</div> </div>
...@@ -120,20 +164,43 @@ export default { ...@@ -120,20 +164,43 @@ export default {
/> />
<label class="label-bold" for="every-month"> <label class="label-bold" for="every-month">
{{ __('Every month (on the 1st at 4:00am)') }} {{ everyMonthText }}
</label> </label>
</div> </div>
<div class="cron-preset-radio-input">
<input
id="custom"
ref="customInput"
v-model="cronInterval"
:name="inputNameAttribute"
:value="cronInterval"
class="label-bold"
type="radio"
@click="toggleCustomInput(true)"
/>
<label for="custom"> {{ s__('PipelineSheduleIntervalPattern|Custom') }} </label>
<gl-sprintf :message="__('(%{linkStart}Cron syntax%{linkEnd})')">
<template #link="{content}">
<gl-link :href="cronSyntaxUrl" target="_blank" class="gl-font-sm">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
</div>
<div class="cron-interval-input-wrapper"> <div class="cron-interval-input-wrapper">
<input <input
id="schedule_cron" id="schedule_cron"
v-model="cronInterval" v-model="cronInterval"
:placeholder="__('Define a custom pattern with cron syntax')" :placeholder="__('Define a custom pattern with cron syntax')"
:name="inputNameAttribute" :name="inputNameAttribute"
:disabled="!isEditable"
class="form-control inline cron-interval-input" class="form-control inline cron-interval-input"
type="text" type="text"
required="true" required="true"
@input="setCustomInput"
/> />
</div> </div>
</div> </div>
......
...@@ -11,9 +11,7 @@ Vue.use(Translate); ...@@ -11,9 +11,7 @@ Vue.use(Translate);
function initIntervalPatternInput() { function initIntervalPatternInput() {
const intervalPatternMount = document.getElementById('interval-pattern-input'); const intervalPatternMount = document.getElementById('interval-pattern-input');
const initialCronInterval = intervalPatternMount const initialCronInterval = intervalPatternMount?.dataset?.initialInterval;
? intervalPatternMount.dataset.initialInterval
: '';
return new Vue({ return new Vue({
el: intervalPatternMount, el: intervalPatternMount,
......
...@@ -21,11 +21,6 @@ ...@@ -21,11 +21,6 @@
.cron-interval-input { .cron-interval-input {
margin: 10px 10px 0 0; margin: 10px 10px 0 0;
} }
.cron-syntax-link-wrap {
margin-right: 10px;
font-size: 12px;
}
} }
.pipeline-schedule-table-row { .pipeline-schedule-table-row {
......
...@@ -206,10 +206,16 @@ module Clusters ...@@ -206,10 +206,16 @@ module Clusters
end end
end end
def nodes
with_reactive_cache do |data|
data[:nodes]
end
end
def calculate_reactive_cache def calculate_reactive_cache
return unless enabled? return unless enabled?
{ connection_status: retrieve_connection_status } { connection_status: retrieve_connection_status, nodes: retrieve_nodes }
end end
def persisted_applications def persisted_applications
...@@ -348,32 +354,55 @@ module Clusters ...@@ -348,32 +354,55 @@ module Clusters
end end
def retrieve_connection_status def retrieve_connection_status
kubeclient.core_client.discover result = ::Gitlab::Kubernetes::KubeClient.graceful_request(id) { kubeclient.core_client.discover }
rescue *Gitlab::Kubernetes::Errors::CONNECTION result[:status]
:unreachable end
rescue *Gitlab::Kubernetes::Errors::AUTHENTICATION
:authentication_failure def retrieve_nodes
rescue Kubeclient::HttpError => e result = ::Gitlab::Kubernetes::KubeClient.graceful_request(id) { kubeclient.get_nodes }
kubeclient_error_status(e.message) cluster_nodes = result[:response].to_a
rescue => e
Gitlab::ErrorTracking.track_exception(e, cluster_id: id) result = ::Gitlab::Kubernetes::KubeClient.graceful_request(id) { kubeclient.metrics_client.get_nodes }
nodes_metrics = result[:response].to_a
:unknown_failure
else cluster_nodes.inject([]) do |memo, node|
:connected sliced_node = filter_relevant_node_attributes(node)
end
matched_node_metric = nodes_metrics.find { |node_metric| node_metric.metadata.name == node.metadata.name }
# KubeClient uses the same error class
# For connection errors (eg. timeout) and sliced_node_metrics = matched_node_metric ? filter_relevant_node_metrics_attributes(matched_node_metric) : {}
# for Kubernetes errors.
def kubeclient_error_status(message) memo << sliced_node.merge(sliced_node_metrics)
if message&.match?(/timed out|timeout/i)
:unreachable
else
:authentication_failure
end end
end end
def filter_relevant_node_attributes(node)
{
'metadata' => {
'name' => node.metadata.name
},
'status' => {
'capacity' => {
'cpu' => node.status.capacity.cpu,
'memory' => node.status.capacity.memory
},
'allocatable' => {
'cpu' => node.status.allocatable.cpu,
'memory' => node.status.allocatable.memory
}
}
}
end
def filter_relevant_node_metrics_attributes(node_metrics)
{
'usage' => {
'cpu' => node_metrics.usage.cpu,
'memory' => node_metrics.usage.memory
}
}
end
# To keep backward compatibility with AUTO_DEVOPS_DOMAIN # To keep backward compatibility with AUTO_DEVOPS_DOMAIN
# environment variable, we need to ensure KUBE_INGRESS_BASE_DOMAIN # environment variable, we need to ensure KUBE_INGRESS_BASE_DOMAIN
# is set if AUTO_DEVOPS_DOMAIN is set on any of the following options: # is set if AUTO_DEVOPS_DOMAIN is set on any of the following options:
......
...@@ -7,12 +7,12 @@ class ClusterEntity < Grape::Entity ...@@ -7,12 +7,12 @@ class ClusterEntity < Grape::Entity
expose :enabled expose :enabled
expose :environment_scope expose :environment_scope
expose :name expose :name
expose :nodes
expose :status_name, as: :status expose :status_name, as: :status
expose :status_reason expose :status_reason
expose :applications, using: ClusterApplicationEntity
expose :path do |cluster| expose :path do |cluster|
Clusters::ClusterPresenter.new(cluster).show_path # rubocop: disable CodeReuse/Presenter Clusters::ClusterPresenter.new(cluster).show_path # rubocop: disable CodeReuse/Presenter
end end
expose :applications, using: ClusterApplicationEntity
end end
...@@ -11,6 +11,7 @@ class ClusterSerializer < BaseSerializer ...@@ -11,6 +11,7 @@ class ClusterSerializer < BaseSerializer
:enabled, :enabled,
:environment_scope, :environment_scope,
:name, :name,
:nodes,
:path, :path,
:status :status
] ]
......
...@@ -115,6 +115,10 @@ module MergeRequests ...@@ -115,6 +115,10 @@ module MergeRequests
filter_merge_requests(merge_requests).each do |merge_request| filter_merge_requests(merge_requests).each do |merge_request|
if branch_and_project_match?(merge_request) || @push.force_push? if branch_and_project_match?(merge_request) || @push.force_push?
merge_request.reload_diff(current_user) merge_request.reload_diff(current_user)
# Clear existing merge error if the push were directed at the
# source branch. Clearing the error when the target branch
# changes will hide the error from the user.
merge_request.merge_error = nil
elsif merge_request.merge_request_diff.includes_any_commits?(push_commit_ids) elsif merge_request.merge_request_diff.includes_any_commits?(push_commit_ids)
merge_request.reload_diff(current_user) merge_request.reload_diff(current_user)
end end
......
---
title: Use NOT VALID to enforce a not null constraint on file store columns
merge_request: 31261
author:
type: performance
---
title: Update cron job schedule to have a random time generated on page load
merge_request: 30729
author:
type: changed
---
title: Add inherit_from_id column to services table
merge_request: 31320
author:
type: other
---
title: Decode dashboard_path when creating annotations
merge_request: 31665
author:
type: fixed
---
title: Fix leaky constant issue in nulls pt2 spec
merge_request: 32058
author: Rajendra Kadam
type: fixed
---
title: Clear merge request error on push to source branch
merge_request: 32001
author:
type: fixed
# frozen_string_literal: true
class AddInheritFromToServices < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :services, :inherit_from_id, :bigint
end
end
# frozen_string_literal: true
class AddIndexInheritFromIdToServices < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_concurrent_index :services, :inherit_from_id
add_concurrent_foreign_key :services, :services, column: :inherit_from_id, on_delete: :nullify
end
def down
remove_foreign_key_if_exists :services, column: :inherit_from_id
remove_concurrent_index :services, :inherit_from_id
end
end
# frozen_string_literal: true
class FillFileStoreLfsObjects < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
update_column_in_batches(:lfs_objects, :file_store, 1) do |table, query|
query.where(table[:file_store].eq(nil))
end
end
def down
# no-op
end
end
# frozen_string_literal: true
class FillStoreUploads < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
update_column_in_batches(:uploads, :store, 1) do |table, query|
query.where(table[:store].eq(nil))
end
end
def down
# no-op
end
end
# frozen_string_literal: true
class FillFileStoreCiJobArtifacts < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
# rubocop:disable Migration/UpdateLargeTable
update_column_in_batches(:ci_job_artifacts, :file_store, 1) do |table, query|
query.where(table[:file_store].eq(nil))
end
# rubocop:enable Migration/UpdateLargeTable
end
def down
# no-op
end
end
# frozen_string_literal: true
class AddNotNullConstraintOnFileStoreToLfsObjects < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_not_null_constraint(:lfs_objects, :file_store, validate: false)
end
def down
remove_not_null_constraint(:lfs_objects, :file_store)
end
end
# frozen_string_literal: true
class AddNotNullConstraintOnStoreToUploads < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_not_null_constraint(:uploads, :store, validate: false)
end
def down
remove_not_null_constraint(:uploads, :store)
end
end
# frozen_string_literal: true
class AddNotNullConstraintOnFileStoreToCiJobsArtifacts < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_not_null_constraint(:ci_job_artifacts, :file_store, validate: false)
end
def down
remove_not_null_constraint(:ci_job_artifacts, :file_store)
end
end
...@@ -6058,7 +6058,8 @@ CREATE TABLE public.services ( ...@@ -6058,7 +6058,8 @@ CREATE TABLE public.services (
comment_on_event_enabled boolean DEFAULT true NOT NULL, comment_on_event_enabled boolean DEFAULT true NOT NULL,
template boolean DEFAULT false, template boolean DEFAULT false,
instance boolean DEFAULT false NOT NULL, instance boolean DEFAULT false NOT NULL,
comment_detail smallint comment_detail smallint,
inherit_from_id bigint
); );
CREATE SEQUENCE public.services_id_seq CREATE SEQUENCE public.services_id_seq
...@@ -7995,6 +7996,15 @@ ALTER TABLE ONLY public.chat_names ...@@ -7995,6 +7996,15 @@ ALTER TABLE ONLY public.chat_names
ALTER TABLE ONLY public.chat_teams ALTER TABLE ONLY public.chat_teams
ADD CONSTRAINT chat_teams_pkey PRIMARY KEY (id); ADD CONSTRAINT chat_teams_pkey PRIMARY KEY (id);
ALTER TABLE public.ci_job_artifacts
ADD CONSTRAINT check_27f0f6dbab CHECK ((file_store IS NOT NULL)) NOT VALID;
ALTER TABLE public.uploads
ADD CONSTRAINT check_5e9547379c CHECK ((store IS NOT NULL)) NOT VALID;
ALTER TABLE public.lfs_objects
ADD CONSTRAINT check_eecfc5717d CHECK ((file_store IS NOT NULL)) NOT VALID;
ALTER TABLE ONLY public.ci_build_needs ALTER TABLE ONLY public.ci_build_needs
ADD CONSTRAINT ci_build_needs_pkey PRIMARY KEY (id); ADD CONSTRAINT ci_build_needs_pkey PRIMARY KEY (id);
...@@ -10561,6 +10571,8 @@ CREATE INDEX index_serverless_domain_cluster_on_pages_domain_id ON public.server ...@@ -10561,6 +10571,8 @@ CREATE INDEX index_serverless_domain_cluster_on_pages_domain_id ON public.server
CREATE INDEX index_service_desk_enabled_projects_on_id_creator_id_created_at ON public.projects USING btree (id, creator_id, created_at) WHERE (service_desk_enabled = true); CREATE INDEX index_service_desk_enabled_projects_on_id_creator_id_created_at ON public.projects USING btree (id, creator_id, created_at) WHERE (service_desk_enabled = true);
CREATE INDEX index_services_on_inherit_from_id ON public.services USING btree (inherit_from_id);
CREATE INDEX index_services_on_project_id_and_type ON public.services USING btree (project_id, type); CREATE INDEX index_services_on_project_id_and_type ON public.services USING btree (project_id, type);
CREATE INDEX index_services_on_template ON public.services USING btree (template); CREATE INDEX index_services_on_template ON public.services USING btree (template);
...@@ -11228,6 +11240,9 @@ ALTER TABLE ONLY public.merge_request_diffs ...@@ -11228,6 +11240,9 @@ ALTER TABLE ONLY public.merge_request_diffs
ALTER TABLE ONLY public.ci_pipelines ALTER TABLE ONLY public.ci_pipelines
ADD CONSTRAINT fk_86635dbd80 FOREIGN KEY (project_id) REFERENCES public.projects(id) ON DELETE CASCADE; ADD CONSTRAINT fk_86635dbd80 FOREIGN KEY (project_id) REFERENCES public.projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY public.services
ADD CONSTRAINT fk_868a8e7ad6 FOREIGN KEY (inherit_from_id) REFERENCES public.services(id) ON DELETE SET NULL;
ALTER TABLE ONLY public.geo_event_log ALTER TABLE ONLY public.geo_event_log
ADD CONSTRAINT fk_86c84214ec FOREIGN KEY (repository_renamed_event_id) REFERENCES public.geo_repository_renamed_events(id) ON DELETE CASCADE; ADD CONSTRAINT fk_86c84214ec FOREIGN KEY (repository_renamed_event_id) REFERENCES public.geo_repository_renamed_events(id) ON DELETE CASCADE;
...@@ -13788,6 +13803,8 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -13788,6 +13803,8 @@ COPY "schema_migrations" (version) FROM STDIN;
20200507221434 20200507221434
20200508091106 20200508091106
20200511092714 20200511092714
20200511115430
20200511115431
20200511121549 20200511121549
20200511121610 20200511121610
20200511121620 20200511121620
...@@ -13795,5 +13812,11 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -13795,5 +13812,11 @@ COPY "schema_migrations" (version) FROM STDIN;
20200511162057 20200511162057
20200511162115 20200511162115
20200512085150 20200512085150
20200513234502
20200513235347
20200513235532
20200514000009
20200514000132
20200514000340
\. \.
...@@ -35,8 +35,8 @@ Follow the steps below to set up a custom hook: ...@@ -35,8 +35,8 @@ Follow the steps below to set up a custom hook:
`/home/git/gitlab/file_hooks/`. For Omnibus installs the path is `/home/git/gitlab/file_hooks/`. For Omnibus installs the path is
usually `/opt/gitlab/embedded/service/gitlab-rails/file_hooks`. usually `/opt/gitlab/embedded/service/gitlab-rails/file_hooks`.
For [highly available](reference_architectures/index.md) configurations, your hook file should exist on each For [configurations with multiple servers](reference_architectures/index.md),
application server. your hook file should exist on each application server.
1. Inside the `file_hooks` directory, create a file with a name of your choice, 1. Inside the `file_hooks` directory, create a file with a name of your choice,
without spaces or special characters. without spaces or special characters.
......
...@@ -79,7 +79,7 @@ GitLab stores files and blobs such as Issue attachments or LFS objects into eith ...@@ -79,7 +79,7 @@ GitLab stores files and blobs such as Issue attachments or LFS objects into eith
- A Storage Appliance that exposes an Object Storage-compatible API. - A Storage Appliance that exposes an Object Storage-compatible API.
When using the filesystem store instead of Object Storage, you need to use network mounted filesystems When using the filesystem store instead of Object Storage, you need to use network mounted filesystems
to run GitLab when using more than one server (for example with a High Availability setup). to run GitLab when using more than one server.
With respect to replication and verification: With respect to replication and verification:
......
...@@ -153,8 +153,7 @@ To configure the connection to the external read-replica database and enable Log ...@@ -153,8 +153,7 @@ To configure the connection to the external read-replica database and enable Log
**Secondary** nodes use a separate PostgreSQL installation as a tracking **Secondary** nodes use a separate PostgreSQL installation as a tracking
database to keep track of replication status and automatically recover from database to keep track of replication status and automatically recover from
potential replication issues. Omnibus automatically configures a tracking database potential replication issues. Omnibus automatically configures a tracking database
when `roles ['geo_secondary_role']` is set. For high availability, when `roles ['geo_secondary_role']` is set.
refer to [Geo High Availability](../../reference_architectures/index.md).
If you want to run this database external to Omnibus, please follow the instructions below. If you want to run this database external to Omnibus, please follow the instructions below.
The tracking database requires an [FDW](https://www.postgresql.org/docs/11/postgres-fdw.html) The tracking database requires an [FDW](https://www.postgresql.org/docs/11/postgres-fdw.html)
......
...@@ -30,7 +30,7 @@ GitLab 12.2 includes the following minor PostgreSQL updates: ...@@ -30,7 +30,7 @@ GitLab 12.2 includes the following minor PostgreSQL updates:
This update will occur even if major PostgreSQL updates are disabled. This update will occur even if major PostgreSQL updates are disabled.
Before [refreshing Foreign Data Wrapper during a Geo HA upgrade](https://docs.gitlab.com/omnibus/update/README.html#run-post-deployment-migrations-and-checks), Before [refreshing Foreign Data Wrapper during a Geo upgrade](https://docs.gitlab.com/omnibus/update/README.html#run-post-deployment-migrations-and-checks),
restart the Geo tracking database: restart the Geo tracking database:
```shell ```shell
...@@ -100,8 +100,8 @@ authentication method. ...@@ -100,8 +100,8 @@ authentication method.
postgresql['sql_user_password'] = '<md5_hash_of_your_password>' postgresql['sql_user_password'] = '<md5_hash_of_your_password>'
# Every node that runs Unicorn or Sidekiq needs to have the database # Every node that runs Unicorn or Sidekiq needs to have the database
# password specified as below. If you have a high-availability setup, this # password specified as below.
# must be present in all application nodes. # This must be present in all application nodes.
gitlab_rails['db_password'] = '<your_password_here>' gitlab_rails['db_password'] = '<your_password_here>'
``` ```
...@@ -125,8 +125,8 @@ authentication method. ...@@ -125,8 +125,8 @@ authentication method.
postgresql['sql_user_password'] = '<md5_hash_of_your_password>' postgresql['sql_user_password'] = '<md5_hash_of_your_password>'
# Every node that runs Unicorn or Sidekiq needs to have the database # Every node that runs Unicorn or Sidekiq needs to have the database
# password specified as below. If you have a high-availability setup, this # password specified as below.
# must be present in all application nodes. # This must be present in all application nodes.
gitlab_rails['db_password'] = '<your_password_here>' gitlab_rails['db_password'] = '<your_password_here>'
# Enable Foreign Data Wrapper # Enable Foreign Data Wrapper
......
...@@ -55,8 +55,8 @@ for improvements including ...@@ -55,8 +55,8 @@ for improvements including
The minimum recommended configuration for a Gitaly Cluster requires: The minimum recommended configuration for a Gitaly Cluster requires:
- 1 highly available load balancer - 1 load balancer
- 1 highly available PostgreSQL server (PostgreSQL 11 or newer) - 1 PostgreSQL server (PostgreSQL 11 or newer)
- 3 Praefect nodes - 3 Praefect nodes
- 3 Gitaly nodes (1 primary, 2 secondary) - 3 Gitaly nodes (1 primary, 2 secondary)
......
...@@ -34,8 +34,8 @@ Learn how to install, configure, update, and maintain your GitLab instance. ...@@ -34,8 +34,8 @@ Learn how to install, configure, update, and maintain your GitLab instance.
- [Install](../install/README.md): Requirements, directory structures, and installation methods. - [Install](../install/README.md): Requirements, directory structures, and installation methods.
- [Database load balancing](database_load_balancing.md): Distribute database queries among multiple database servers. **(STARTER ONLY)** - [Database load balancing](database_load_balancing.md): Distribute database queries among multiple database servers. **(STARTER ONLY)**
- [Omnibus support for log forwarding](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only) **(STARTER ONLY)** - [Omnibus support for log forwarding](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only) **(STARTER ONLY)**
- [High Availability](reference_architectures/index.md): Configure multiple servers for scaling or high availability. - [Reference architectures](reference_architectures/index.md): Add additional resources to support more users.
- [Installing GitLab HA on Amazon Web Services (AWS)](../install/aws/index.md): Set up GitLab High Availability on Amazon AWS. - [Installing GitLab on Amazon Web Services (AWS)](../install/aws/index.md): Set up GitLab on Amazon AWS.
- [Geo](geo/replication/index.md): Replicate your GitLab instance to other geographic locations as a read-only fully operational version. **(PREMIUM ONLY)** - [Geo](geo/replication/index.md): Replicate your GitLab instance to other geographic locations as a read-only fully operational version. **(PREMIUM ONLY)**
- [Disaster Recovery](geo/disaster_recovery/index.md): Quickly fail-over to a different site with minimal effort in a disaster situation. **(PREMIUM ONLY)** - [Disaster Recovery](geo/disaster_recovery/index.md): Quickly fail-over to a different site with minimal effort in a disaster situation. **(PREMIUM ONLY)**
- [Pivotal Tile](../install/pivotal/index.md): Deploy GitLab as a preconfigured appliance using Ops Manager (BOSH) for Pivotal Cloud Foundry. **(PREMIUM ONLY)** - [Pivotal Tile](../install/pivotal/index.md): Deploy GitLab as a preconfigured appliance using Ops Manager (BOSH) for Pivotal Cloud Foundry. **(PREMIUM ONLY)**
......
...@@ -114,7 +114,7 @@ prometheus['scrape_configs'] = [ ...@@ -114,7 +114,7 @@ prometheus['scrape_configs'] = [
NOTE: **Note:** NOTE: **Note:**
Prometheus and most exporters don't support authentication. We don't recommend exposing them outside the local network. Prometheus and most exporters don't support authentication. We don't recommend exposing them outside the local network.
A few configuration changes are required to allow GitLab to be monitored by an external Prometheus server. External servers are recommended for highly available deployments of GitLab with multiple nodes. A few configuration changes are required to allow GitLab to be monitored by an external Prometheus server. External servers are recommended for [GitLab deployments with multiple nodes](../../reference_architectures/index.md).
To use an external Prometheus server: To use an external Prometheus server:
......
...@@ -5,7 +5,7 @@ type: reference ...@@ -5,7 +5,7 @@ type: reference
# Object Storage # Object Storage
GitLab supports using an object storage service for holding numerous types of data. GitLab supports using an object storage service for holding numerous types of data.
In a high availability setup, it's recommended over [NFS](high_availability/nfs.md) and It's recommended over NFS and
in general it's better in larger setups as object storage is in general it's better in larger setups as object storage is
typically much more performant, reliable, and scalable. typically much more performant, reliable, and scalable.
...@@ -38,7 +38,7 @@ For configuring GitLab to use Object Storage refer to the following guides: ...@@ -38,7 +38,7 @@ For configuring GitLab to use Object Storage refer to the following guides:
### Other alternatives to filesystem storage ### Other alternatives to filesystem storage
If you're working to [scale out](reference_architectures/index.md) your GitLab implementation, If you're working to [scale out](reference_architectures/index.md) your GitLab implementation,
or add [fault tolerance and redundancy](high_availability/README.md) you may be or add fault tolerance and redundancy, you may be
looking at removing dependencies on block or network filesystems. looking at removing dependencies on block or network filesystems.
See the following guides and See the following guides and
[note that Pages requires disk storage](#gitlab-pages-requires-nfs): [note that Pages requires disk storage](#gitlab-pages-requires-nfs):
......
...@@ -73,6 +73,12 @@ new one, and attempting to pull a repository. ...@@ -73,6 +73,12 @@ new one, and attempting to pull a repository.
NOTE: **Note:** For Omnibus Docker, `AuthorizedKeysCommand` is setup by default in NOTE: **Note:** For Omnibus Docker, `AuthorizedKeysCommand` is setup by default in
GitLab 11.11 and later. GitLab 11.11 and later.
NOTE: **Note:** For Installations from source, the command would be located at
`/home/git/gitlab-shell/bin/gitlab-shell-authorized-keys-check` if [the install from source](../../install/installation.md#install-gitlab-shell) instructions were followed.
You might want to consider creating a wrapper script somewhere else since this command needs to be
owned by `root` and not be writable by group or others. You could also consider changing the ownership of this command
as required, but that might require temporary ownership changes during `gitlab-shell` upgrades.
CAUTION: **Caution:** Do not disable writes until SSH is confirmed to be working CAUTION: **Caution:** Do not disable writes until SSH is confirmed to be working
perfectly, because the file will quickly become out-of-date. perfectly, because the file will quickly become out-of-date.
......
...@@ -659,9 +659,9 @@ via an Omnibus package or the [cloud native chart](https://docs.gitlab.com/chart ...@@ -659,9 +659,9 @@ via an Omnibus package or the [cloud native chart](https://docs.gitlab.com/chart
DANGER: **Danger:** DANGER: **Danger:**
By running the built-in garbage collection command, it will cause downtime to By running the built-in garbage collection command, it will cause downtime to
the Container Registry. Running this command on an instance in an HA environment the Container Registry. If you run this command on an instance in an environment
while one of your other instances is still writing to the Registry storage, where one of your other instances is still writing to the Registry storage,
will remove referenced manifests. To avoid that, make sure Registry is set to referenced manifests will be removed. To avoid that, make sure Registry is set to
[read-only mode](#performing-garbage-collection-without-downtime) before proceeding. [read-only mode](#performing-garbage-collection-without-downtime) before proceeding.
Container Registry can use considerable amounts of disk space. To clear up Container Registry can use considerable amounts of disk space. To clear up
......
...@@ -45,7 +45,7 @@ This section is for links to information elsewhere in the GitLab documentation. ...@@ -45,7 +45,7 @@ This section is for links to information elsewhere in the GitLab documentation.
- Managing Omnibus PostgreSQL versions [from the development docs](https://docs.gitlab.com/omnibus/development/managing-postgresql-versions.html) - Managing Omnibus PostgreSQL versions [from the development docs](https://docs.gitlab.com/omnibus/development/managing-postgresql-versions.html)
- [PostgreSQL scaling and HA](../high_availability/database.md) - [PostgreSQL scaling](../high_availability/database.md)
- including [troubleshooting](../high_availability/database.md#troubleshooting) `gitlab-ctl repmgr-check-master` and PgBouncer errors - including [troubleshooting](../high_availability/database.md#troubleshooting) `gitlab-ctl repmgr-check-master` and PgBouncer errors
- [Developer database documentation](../../development/README.md#database-guides) - some of which is absolutely not for production use. Including: - [Developer database documentation](../../development/README.md#database-guides) - some of which is absolutely not for production use. Including:
......
...@@ -82,6 +82,11 @@ As a general rule, a worker can be considered idempotent if: ...@@ -82,6 +82,11 @@ As a general rule, a worker can be considered idempotent if:
A good example of that would be a cache expiration worker. A good example of that would be a cache expiration worker.
NOTE: **Note:**
A job scheduled for an idempotent worker will automatically be
[deduplicated](#deduplication) when an unstarted job with the same
arguments is already in the queue.
### Ensuring a worker is idempotent ### Ensuring a worker is idempotent
Make sure the worker tests pass using the following shared example: Make sure the worker tests pass using the following shared example:
......
...@@ -14,5 +14,5 @@ This is a partial list of the [RSpec metadata](https://relishapp.com/rspec/rspec ...@@ -14,5 +14,5 @@ This is a partial list of the [RSpec metadata](https://relishapp.com/rspec/rspec
| `:reliable` | The test has been [promoted to a reliable test](https://about.gitlab.com/handbook/engineering/quality/guidelines/reliable-tests/#promoting-an-existing-test-to-reliable) meaning it passes consistently in all pipelines, including merge requests. | | `:reliable` | The test has been [promoted to a reliable test](https://about.gitlab.com/handbook/engineering/quality/guidelines/reliable-tests/#promoting-an-existing-test-to-reliable) meaning it passes consistently in all pipelines, including merge requests. |
| `:requires_admin` | The test requires an admin account. Tests with the tag are excluded when run against Canary and Production environments. | | `:requires_admin` | The test requires an admin account. Tests with the tag are excluded when run against Canary and Production environments. |
| `:runner` | The test depends on and will set up a GitLab Runner instance, typically to run a pipeline. | | `:runner` | The test depends on and will set up a GitLab Runner instance, typically to run a pipeline. |
| `:gitaly_ha` | The test will run against a GitLab instance where repositories are stored on redundant Gitaly nodes behind a Praefect node. All nodes are separate containers in a minimal version of [our recommended highly available configuration](../../../administration/gitaly/praefect.md#requirements-for-configuring-a-gitaly-cluster). Tests that use this tag have a longer setup time since there are three additional containers that need to be started. | | `:gitaly_ha` | The test will run against a GitLab instance where repositories are stored on redundant Gitaly nodes behind a Praefect node. All nodes are [separate containers](../../../administration/gitaly/praefect.md#requirements-for-configuring-a-gitaly-cluster). Tests that use this tag have a longer setup time since there are three additional containers that need to be started. |
| `:skip_live_env` | The test will be excluded when run against live deployed environments such as Staging, Canary, and Production. | | `:skip_live_env` | The test will be excluded when run against live deployed environments such as Staging, Canary, and Production. |
...@@ -11,7 +11,7 @@ The following graph explains machine boundaries in a scalable GitLab installatio ...@@ -11,7 +11,7 @@ The following graph explains machine boundaries in a scalable GitLab installatio
```mermaid ```mermaid
graph TB graph TB
subgraph "load balancers" subgraph "load balancers"
LB(HA Proxy) LB(Proxy)
end end
subgraph "Shared storage" subgraph "Shared storage"
......
...@@ -4,7 +4,7 @@ type: howto ...@@ -4,7 +4,7 @@ type: howto
# Installing GitLab HA on Amazon Web Services (AWS) # Installing GitLab HA on Amazon Web Services (AWS)
This page offers a walkthrough of a common HA (Highly Available) configuration This page offers a walkthrough of a common configuration
for GitLab on AWS. You should customize it to accommodate your needs. for GitLab on AWS. You should customize it to accommodate your needs.
NOTE: **Note** NOTE: **Note**
...@@ -13,9 +13,8 @@ For organizations with 300 users or less, the recommended AWS installation metho ...@@ -13,9 +13,8 @@ For organizations with 300 users or less, the recommended AWS installation metho
## Introduction ## Introduction
GitLab on AWS can leverage many of the services that are already GitLab on AWS can leverage many of the services that are already
configurable with GitLab High Availability (HA). These services offer a great deal of configurable. These services offer a great deal of
flexibility and can be adapted to the needs of most companies, while enabling the flexibility and can be adapted to the needs of most companies.
automation of both vertical and horizontal scaling.
In this guide, we'll go through a basic HA setup where we'll start by In this guide, we'll go through a basic HA setup where we'll start by
configuring our Virtual Private Cloud and subnets to later integrate configuring our Virtual Private Cloud and subnets to later integrate
...@@ -54,11 +53,10 @@ Here's a list of the AWS services we will use, with links to pricing information ...@@ -54,11 +53,10 @@ Here's a list of the AWS services we will use, with links to pricing information
[Amazon S3 pricing](https://aws.amazon.com/s3/pricing/). [Amazon S3 pricing](https://aws.amazon.com/s3/pricing/).
- **ELB**: A Classic Load Balancer will be used to route requests to the - **ELB**: A Classic Load Balancer will be used to route requests to the
GitLab instances. See the [Amazon ELB pricing](https://aws.amazon.com/elasticloadbalancing/pricing/). GitLab instances. See the [Amazon ELB pricing](https://aws.amazon.com/elasticloadbalancing/pricing/).
- **RDS**: An Amazon Relational Database Service using PostgreSQL will be used - **RDS**: An Amazon Relational Database Service using PostgreSQL will be used. See the
to provide a High Availability database configuration. See the
[Amazon RDS pricing](https://aws.amazon.com/rds/postgresql/pricing/). [Amazon RDS pricing](https://aws.amazon.com/rds/postgresql/pricing/).
- **ElastiCache**: An in-memory cache environment will be used to provide a - **ElastiCache**: An in-memory cache environment will be used to provide a
High Availability Redis configuration. See the Redis configuration. See the
[Amazon ElastiCache pricing](https://aws.amazon.com/elasticache/pricing/). [Amazon ElastiCache pricing](https://aws.amazon.com/elasticache/pricing/).
NOTE: **Note:** Please note that while we will be using EBS for storage, we do not recommend using EFS as it may negatively impact GitLab's performance. You can review the [relevant documentation](../../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details. NOTE: **Note:** Please note that while we will be using EBS for storage, we do not recommend using EFS as it may negatively impact GitLab's performance. You can review the [relevant documentation](../../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
...@@ -311,7 +309,7 @@ Now, it's time to create the database: ...@@ -311,7 +309,7 @@ Now, it's time to create the database:
1. Select **Provisioned IOPS (SSD)** from the storage type dropdown menu. Provisioned IOPS (SSD) storage is best suited for HA (though you can choose General Purpose (SSD) to reduce the costs). Read more about it at [Storage for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html). 1. Select **Provisioned IOPS (SSD)** from the storage type dropdown menu. Provisioned IOPS (SSD) storage is best suited for HA (though you can choose General Purpose (SSD) to reduce the costs). Read more about it at [Storage for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html).
1. Allocate storage and set provisioned IOPS. We'll use the minimum values, `100` and `1000`, respectively. 1. Allocate storage and set provisioned IOPS. We'll use the minimum values, `100` and `1000`, respectively.
1. Enable storage autoscaling (optional) and set a maximum storage threshold. 1. Enable storage autoscaling (optional) and set a maximum storage threshold.
1. Under **Availability & durability**, select **Create a standby instance** to have a standby RDS instance provisioned in a different Availability Zone. Read more at [High Availability (Multi-AZ)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html). 1. Under **Availability & durability**, select **Create a standby instance** to have a standby RDS instance provisioned in a different [Availability Zone](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html).
1. Under **Connectivity**, configure the following: 1. Under **Connectivity**, configure the following:
1. Select the VPC we created earlier (`gitlab-vpc`) from the **Virtual Private Cloud (VPC)** dropdown menu. 1. Select the VPC we created earlier (`gitlab-vpc`) from the **Virtual Private Cloud (VPC)** dropdown menu.
1. Expand the **Additional connectivity configuration** section and select the subnet group (`gitlab-rds-group`) we created earlier. 1. Expand the **Additional connectivity configuration** section and select the subnet group (`gitlab-rds-group`) we created earlier.
...@@ -589,7 +587,7 @@ and the repository exists. ...@@ -589,7 +587,7 @@ and the repository exists.
#### Configure host keys #### Configure host keys
Ordinarily we would manually copy the contents (primary and public keys) of `/etc/ssh/` on the primary application server to `/etc/ssh` on all secondary servers. This prevents false man-in-the-middle-attack alerts when accessing servers in your High Availability cluster behind a load balancer. Ordinarily we would manually copy the contents (primary and public keys) of `/etc/ssh/` on the primary application server to `/etc/ssh` on all secondary servers. This prevents false man-in-the-middle-attack alerts when accessing servers in your cluster behind a load balancer.
We'll automate this by creating static host keys as part of our custom AMI. As these host keys are also rotated every time an EC2 instance boots up, "hard coding" them into our custom AMI serves as a handy workaround. We'll automate this by creating static host keys as part of our custom AMI. As these host keys are also rotated every time an EC2 instance boots up, "hard coding" them into our custom AMI serves as a handy workaround.
...@@ -770,7 +768,7 @@ After a few minutes, the new version should be up and running. ...@@ -770,7 +768,7 @@ After a few minutes, the new version should be up and running.
In this guide, we went mostly through scaling and some redundancy options, In this guide, we went mostly through scaling and some redundancy options,
your mileage may vary. your mileage may vary.
Keep in mind that all Highly Available solutions come with a trade-off between Keep in mind that all solutions come with a trade-off between
cost/complexity and uptime. The more uptime you want, the more complex the solution. cost/complexity and uptime. The more uptime you want, the more complex the solution.
And the more complex the solution, the more work is involved in setting up and And the more complex the solution, the more work is involved in setting up and
maintaining it. maintaining it.
...@@ -780,7 +778,7 @@ Have a read through these other resources and feel free to ...@@ -780,7 +778,7 @@ Have a read through these other resources and feel free to
to request additional material: to request additional material:
- [Scaling GitLab](../../administration/reference_architectures/index.md): - [Scaling GitLab](../../administration/reference_architectures/index.md):
GitLab supports several different types of clustering and high-availability. GitLab supports several different types of clustering.
- [Geo replication](../../administration/geo/replication/index.md): - [Geo replication](../../administration/geo/replication/index.md):
Geo is the solution for widely distributed development teams. Geo is the solution for widely distributed development teams.
- [Omnibus GitLab](https://docs.gitlab.com/omnibus/) - Everything you need to know - [Omnibus GitLab](https://docs.gitlab.com/omnibus/) - Everything you need to know
......
...@@ -275,8 +275,7 @@ You must define environment-scoped variables for `POSTGRES_ENABLED` and ...@@ -275,8 +275,7 @@ You must define environment-scoped variables for `POSTGRES_ENABLED` and
1. Disable the built-in PostgreSQL installation for the required environments using 1. Disable the built-in PostgreSQL installation for the required environments using
scoped [environment variables](../../ci/environments.md#scoping-environments-with-specs). scoped [environment variables](../../ci/environments.md#scoping-environments-with-specs).
For this use case, it's likely that only `production` will need to be added to this For this use case, it's likely that only `production` will need to be added to this
list. The built-in PostgreSQL setup for Review Apps and staging is sufficient, list. The built-in PostgreSQL setup for Review Apps and staging is sufficient.
because a high availability setup is not required.
![Auto Metrics](img/disable_postgres.png) ![Auto Metrics](img/disable_postgres.png)
......
...@@ -20,10 +20,19 @@ You can style a message's content using the `a` and `br` HTML tags. The `br` tag ...@@ -20,10 +20,19 @@ You can style a message's content using the `a` and `br` HTML tags. The `br` tag
## Banners ## Banners
Banners are shown on the top of a page. Banners are shown on the top of a page and in Git remote responses.
![Broadcast Message Banner](img/broadcast_messages_banner_v12_10.png) ![Broadcast Message Banner](img/broadcast_messages_banner_v12_10.png)
```bash
$ git push
...
remote:
remote: **Welcome** to GitLab :wave:
remote:
...
```
## Notifications ## Notifications
Notifications are shown on the bottom right of a page and can contain placeholders. A placeholder is replaced with an attribute of the active user. Placeholders must be surrounded by curly braces, for example `{{name}}`. Notifications are shown on the bottom right of a page and can contain placeholders. A placeholder is replaced with an attribute of the active user. Placeholders must be surrounded by curly braces, for example `{{name}}`.
......
...@@ -8,6 +8,8 @@ type: reference ...@@ -8,6 +8,8 @@ type: reference
An admin can enforce acceptance of a terms of service and privacy policy. When this option is enabled, new and existing users must accept the terms. An admin can enforce acceptance of a terms of service and privacy policy. When this option is enabled, new and existing users must accept the terms.
If configured, the Terms of Service page can be viewed via `https://your-instance.com/-/users/terms` at anytime.
## Configuration ## Configuration
To enforce acceptance of a Terms of Service and Privacy Policy: To enforce acceptance of a Terms of Service and Privacy Policy:
...@@ -21,7 +23,7 @@ To enforce acceptance of a Terms of Service and Privacy Policy: ...@@ -21,7 +23,7 @@ To enforce acceptance of a Terms of Service and Privacy Policy:
1. Click **Save changes**. 1. Click **Save changes**.
1. When you are presented with the **Terms of Service** statement, click **Accept terms**. 1. When you are presented with the **Terms of Service** statement, click **Accept terms**.
![Enable enforcing Terms of Service](img/enforce_terms.png). ![Enable enforcing Terms of Service](img/enforce_terms.png)
For each update to the terms, a new version is stored. When a user accepts or declines the terms, For each update to the terms, a new version is stored. When a user accepts or declines the terms,
GitLab will record which version they accepted or declined. GitLab will record which version they accepted or declined.
......
...@@ -86,9 +86,6 @@ with this approach, however, and there is a ...@@ -86,9 +86,6 @@ with this approach, however, and there is a
> Introduced in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.8. > Introduced in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.8.
CAUTION: **Warning:**
This feature is currently [Alpha](https://about.gitlab.com/handbook/product/#alpha-beta-ga) and while you can start using it, it may receive important changes in the future.
Each security vulnerability in the merge request report or the Each security vulnerability in the merge request report or the
[Security Dashboard](security_dashboard/index.md) is actionable. Click an entry to view detailed [Security Dashboard](security_dashboard/index.md) is actionable. Click an entry to view detailed
information with several options: information with several options:
......
...@@ -4,11 +4,7 @@ type: reference, howto ...@@ -4,11 +4,7 @@ type: reference, howto
# Standalone Vulnerability pages # Standalone Vulnerability pages
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/13561) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 12.10. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/13561) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 13.0.
CAUTION: **Warning:**
This feature is currently [Alpha](https://about.gitlab.com/handbook/product/#alpha-beta-ga).
You can begin using it, but it may receive important changes in the future.
Each security vulnerability in the [Vulnerability List](../dependency_list/index.md) has its own standalone Each security vulnerability in the [Vulnerability List](../dependency_list/index.md) has its own standalone
page. page.
......
...@@ -933,8 +933,8 @@ vault: ...@@ -933,8 +933,8 @@ vault:
installed: true installed: true
``` ```
By default you will get a basic Vault setup with no high availability nor any scalable By default you will get a basic Vault setup with no scalable
storage backend. This is enough for simple testing and small scale deployments, though has limits storage backend. This is enough for simple testing and small-scale deployments, though has limits
to how much it can scale, and as it is a single instance deployment, you will experience downtime to how much it can scale, and as it is a single instance deployment, you will experience downtime
when upgrading the Vault application. when upgrading the Vault application.
......
...@@ -55,6 +55,11 @@ The default syntax theme is White, and you can choose among 5 different themes: ...@@ -55,6 +55,11 @@ The default syntax theme is White, and you can choose among 5 different themes:
![Profile preferences syntax highlighting themes](img/profile-preferences-syntax-themes.png) ![Profile preferences syntax highlighting themes](img/profile-preferences-syntax-themes.png)
[Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2389) in 13.0, the theme
you choose also applies to the [Web IDE](../project/web_ide/index.md)'s code editor and [Snippets](../snippets.md).
The themes are available only in the Web IDE file editor, except for the [dark theme](https://gitlab.com/gitlab-org/gitlab/-/issues/209808),
which applies to the entire Web IDE screen.
## Behavior ## Behavior
The following settings allow you to customize the behavior of GitLab's layout The following settings allow you to customize the behavior of GitLab's layout
......
...@@ -64,6 +64,15 @@ list. ...@@ -64,6 +64,15 @@ list.
![Merge request diff file navigation](img/merge_request_diff_file_navigation.png) ![Merge request diff file navigation](img/merge_request_diff_file_navigation.png)
### Merge requests commit navigation
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/18140) in GitLab 13.0.
To seamlessly navigate among commits in a merge request, from the **Commits** tab, click one of
the commits to open the single-commit view. From there, you can navigate among the commits
by clicking the **Prev** and **Next** buttons on the top-right of the page or by using the
<kbd>X</kbd> and <kbd>C</kbd> keyboard shortcuts.
### Incrementally expand merge request diffs ### Incrementally expand merge request diffs
By default, the diff shows only the parts of a file which are changed. By default, the diff shows only the parts of a file which are changed.
......
...@@ -102,12 +102,15 @@ The physical location of the asset can change at any time and the direct link wi ...@@ -102,12 +102,15 @@ The physical location of the asset can change at any time and the direct link wi
### Releases associated with milestones ### Releases associated with milestones
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/29020) in GitLab 12.5. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/29020) in GitLab 12.5.
> - [Updated](https://gitlab.com/gitlab-org/gitlab/-/issues/39467) to edit milestones in the UI in GitLab 13.0.
Releases can optionally be associated with one or more Releases can optionally be associated with one or more
[project milestones](../milestones/index.md#project-milestones-and-group-milestones) [project milestones](../milestones/index.md#project-milestones-and-group-milestones)
by including a `milestones` array in your requests to the by including a `milestones` array in your requests to the
[Releases API](../../../api/releases/index.md#create-a-release). [Releases API](../../../api/releases/index.md#create-a-release) or by using the dropdown in the [Edit Release](#editing-a-release) page.
![Release edit page with milestones dropdown expanded](img/release_milestone_dropdown_v13_0.png)
Releases display this association with the **Milestone** indicator in the top Releases display this association with the **Milestone** indicator in the top
section of the Release block on the **Project overview > Releases** page, along section of the Release block on the **Project overview > Releases** page, along
...@@ -190,11 +193,10 @@ the edit button (pencil icon) in the top-right corner of the release you want to ...@@ -190,11 +193,10 @@ the edit button (pencil icon) in the top-right corner of the release you want to
This will bring you to the **Edit Release** page, from which you can This will bring you to the **Edit Release** page, from which you can
change some of the release's details. change some of the release's details.
![Edit release page](img/edit_release_page_v12_10.png) ![Edit release page](img/edit_release_page_v13_0.png)
Currently, it is only possible to edit the release title, notes, and asset Currently, it is only possible to edit the release title, notes, associated milestones, and asset
links. To change other release information, such as its tag, associated links. To change other release information, such as its tag, or release date, use the [Releases
milestones, or release date, use the [Releases
API](../../../api/releases/index.md#update-a-release). Editing this information API](../../../api/releases/index.md#update-a-release). Editing this information
through the **Edit Release** page is planned for a future version of GitLab. through the **Edit Release** page is planned for a future version of GitLab.
......
...@@ -70,8 +70,8 @@ The Status Page landing page shows you an overview of the recent incidents. Clic ...@@ -70,8 +70,8 @@ The Status Page landing page shows you an overview of the recent incidents. Clic
The incident detail page shows detailed information about a particular incident. For example: The incident detail page shows detailed information about a particular incident. For example:
- Status on the incident, including when the incident was last updated. - Status on the incident, including when the incident was last updated.
- The incident title. - The incident title, including any emojis.
- The description of the incident. - The description of the incident, including emojis and static images.
- A chronological ordered list of updates to the incident. - A chronological ordered list of updates to the incident.
![Status Page detail](../img/status_page_detail_v12_10.png) ![Status Page detail](../img/status_page_detail_v12_10.png)
......
...@@ -43,6 +43,20 @@ you can find a more complete list of supported languages in the ...@@ -43,6 +43,20 @@ you can find a more complete list of supported languages in the
NOTE: **Note:** NOTE: **Note:**
Single file editing is based on the [Ace Editor](https://ace.c9.io). Single file editing is based on the [Ace Editor](https://ace.c9.io).
### Themes
> [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/2389) in GitLab 13.0.
All the themes GitLab supports for syntax highlighting are added to the Web IDE's code editor.
You can pick a theme from your [profile preferences](../../profile/preferences.md).
The themes are available only in the Web IDE file editor, except for the [dark theme](https://gitlab.com/gitlab-org/gitlab/-/issues/209808),
which applies to the entire Web IDE screen.
| Solarized Light Theme | Dark Theme |
|---------------------------------------------------------------|-----------------------------------------|
| ![Solarized Light Theme](img/solarized_light_theme_v13.0.png) | ![Dark Theme](img/dark_theme_v13.0.png) |
## Commit changes ## Commit changes
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4539) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4 and [brought to GitLab Core](https://gitlab.com/gitlab-org/gitlab-foss/issues/44157) in 10.7. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4539) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4 and [brought to GitLab Core](https://gitlab.com/gitlab-org/gitlab-foss/issues/44157) in 10.7.
......
...@@ -8,16 +8,6 @@ module API ...@@ -8,16 +8,6 @@ module API
success Entities::Metrics::Dashboard::Annotation success Entities::Metrics::Dashboard::Annotation
end end
params do
requires :starting_at, type: DateTime,
desc: 'Date time indicating starting moment to which the annotation relates.'
optional :ending_at, type: DateTime,
desc: 'Date time indicating ending moment to which the annotation relates.'
requires :dashboard_path, type: String,
desc: 'The path to a file defining the dashboard on which the annotation should be added'
requires :description, type: String, desc: 'The description of the annotation'
end
ANNOTATIONS_SOURCES = [ ANNOTATIONS_SOURCES = [
{ class: ::Environment, resource: :environments, create_service_param_key: :environment }, { class: ::Environment, resource: :environments, create_service_param_key: :environment },
{ class: Clusters::Cluster, resource: :clusters, create_service_param_key: :cluster } { class: Clusters::Cluster, resource: :clusters, create_service_param_key: :cluster }
...@@ -25,6 +15,16 @@ module API ...@@ -25,6 +15,16 @@ module API
ANNOTATIONS_SOURCES.each do |annotations_source| ANNOTATIONS_SOURCES.each do |annotations_source|
resource annotations_source[:resource] do resource annotations_source[:resource] do
params do
requires :starting_at, type: DateTime,
desc: 'Date time indicating starting moment to which the annotation relates.'
optional :ending_at, type: DateTime,
desc: 'Date time indicating ending moment to which the annotation relates.'
requires :dashboard_path, type: String, coerce_with: -> (val) { CGI.unescape(val) },
desc: 'The path to a file defining the dashboard on which the annotation should be added'
requires :description, type: String, desc: 'The description of the annotation'
end
post ':id/metrics_dashboard/annotations' do post ':id/metrics_dashboard/annotations' do
annotations_source_object = annotations_source[:class].find(params[:id]) annotations_source_object = annotations_source[:class].find(params[:id])
......
...@@ -261,8 +261,9 @@ excluded_attributes: ...@@ -261,8 +261,9 @@ excluded_attributes:
- :token - :token
- :token_encrypted - :token_encrypted
services: services:
- :template - :inherit_from_id
- :instance - :instance
- :template
error_tracking_setting: error_tracking_setting:
- :encrypted_token - :encrypted_token
- :encrypted_token_iv - :encrypted_token_iv
......
...@@ -20,6 +20,7 @@ module Gitlab ...@@ -20,6 +20,7 @@ module Gitlab
extensions: { group: 'apis/extensions', version: 'v1beta1' }, extensions: { group: 'apis/extensions', version: 'v1beta1' },
istio: { group: 'apis/networking.istio.io', version: 'v1alpha3' }, istio: { group: 'apis/networking.istio.io', version: 'v1alpha3' },
knative: { group: 'apis/serving.knative.dev', version: 'v1alpha1' }, knative: { group: 'apis/serving.knative.dev', version: 'v1alpha1' },
metrics: { group: 'apis/metrics.k8s.io', version: 'v1beta1' },
networking: { group: 'apis/networking.k8s.io', version: 'v1' } networking: { group: 'apis/networking.k8s.io', version: 'v1' }
}.freeze }.freeze
...@@ -34,7 +35,8 @@ module Gitlab ...@@ -34,7 +35,8 @@ module Gitlab
end end
# Core API methods delegates to the core api group client # Core API methods delegates to the core api group client
delegate :get_pods, delegate :get_nodes,
:get_pods,
:get_secrets, :get_secrets,
:get_config_map, :get_config_map,
:get_namespace, :get_namespace,
...@@ -102,6 +104,31 @@ module Gitlab ...@@ -102,6 +104,31 @@ module Gitlab
} }
}.freeze }.freeze
def self.graceful_request(cluster_id)
{ status: :connected, response: yield }
rescue *Gitlab::Kubernetes::Errors::CONNECTION
{ status: :unreachable }
rescue *Gitlab::Kubernetes::Errors::AUTHENTICATION
{ status: :authentication_failure }
rescue Kubeclient::HttpError => e
{ status: kubeclient_error_status(e.message) }
rescue => e
Gitlab::ErrorTracking.track_exception(e, cluster_id: cluster_id)
{ status: :unknown_failure }
end
# KubeClient uses the same error class
# For connection errors (eg. timeout) and
# for Kubernetes errors.
def self.kubeclient_error_status(message)
if message&.match?(/timed out|timeout/i)
:unreachable
else
:authentication_failure
end
end
# We disable redirects through 'http_max_redirects: 0', # We disable redirects through 'http_max_redirects: 0',
# so that KubeClient does not follow redirects and # so that KubeClient does not follow redirects and
# expose internal services. # expose internal services.
......
...@@ -586,6 +586,9 @@ msgid_plural "(%d closed)" ...@@ -586,6 +586,9 @@ msgid_plural "(%d closed)"
msgstr[0] "" msgstr[0] ""
msgstr[1] "" msgstr[1] ""
msgid "(%{linkStart}Cron syntax%{linkEnd})"
msgstr ""
msgid "(%{mrCount} merged)" msgid "(%{mrCount} merged)"
msgstr "" msgstr ""
...@@ -6417,9 +6420,6 @@ msgstr "" ...@@ -6417,9 +6420,6 @@ msgstr ""
msgid "Cron Timezone" msgid "Cron Timezone"
msgstr "" msgstr ""
msgid "Cron syntax"
msgstr ""
msgid "Crossplane" msgid "Crossplane"
msgstr "" msgstr ""
...@@ -8711,13 +8711,13 @@ msgstr "" ...@@ -8711,13 +8711,13 @@ msgstr ""
msgid "Every day" msgid "Every day"
msgstr "" msgstr ""
msgid "Every day (at 4:00am)" msgid "Every day (at %{time})"
msgstr "" msgstr ""
msgid "Every month" msgid "Every month"
msgstr "" msgstr ""
msgid "Every month (on the 1st at 4:00am)" msgid "Every month (Day %{day} at %{time})"
msgstr "" msgstr ""
msgid "Every three months" msgid "Every three months"
...@@ -8729,7 +8729,7 @@ msgstr "" ...@@ -8729,7 +8729,7 @@ msgstr ""
msgid "Every week" msgid "Every week"
msgstr "" msgstr ""
msgid "Every week (Sundays at 4:00am)" msgid "Every week (%{weekday} at %{time})"
msgstr "" msgstr ""
msgid "Everyone" msgid "Everyone"
...@@ -18591,6 +18591,9 @@ msgstr "" ...@@ -18591,6 +18591,9 @@ msgstr ""
msgid "Seats in license" msgid "Seats in license"
msgstr "" msgstr ""
msgid "Secondary"
msgstr ""
msgid "Secret" msgid "Secret"
msgstr "" msgstr ""
...@@ -21728,9 +21731,6 @@ msgstr "" ...@@ -21728,9 +21731,6 @@ msgstr ""
msgid "This is a list of devices that have logged into your account. Revoke any sessions that you do not recognize." msgid "This is a list of devices that have logged into your account. Revoke any sessions that you do not recognize."
msgstr "" msgstr ""
msgid "This is a primary node"
msgstr ""
msgid "This is a security log of important events involving your account." msgid "This is a security log of important events involving your account."
msgstr "" msgstr ""
......
...@@ -7,6 +7,7 @@ module QA ...@@ -7,6 +7,7 @@ module QA
let!(:runner) do let!(:runner) do
Resource::Runner.fabricate! do |runner| Resource::Runner.fabricate! do |runner|
runner.name = executor runner.name = executor
runner.tags = ['e2e-test']
end end
end end
......
...@@ -8,7 +8,7 @@ module QA ...@@ -8,7 +8,7 @@ module QA
let(:runner) do let(:runner) do
Resource::Runner.fabricate_via_api! do |runner| Resource::Runner.fabricate_via_api! do |runner|
runner.name = executor runner.name = executor
runner.run_untagged = true runner.tags = ['e2e-test']
end end
end end
...@@ -18,6 +18,7 @@ module QA ...@@ -18,6 +18,7 @@ module QA
mr.file_name = '.gitlab-ci.yml' mr.file_name = '.gitlab-ci.yml'
mr.file_content = <<~EOF mr.file_content = <<~EOF
test: test:
tags: [e2e-test]
script: script:
- echo '(66.67%) covered' - echo '(66.67%) covered'
EOF EOF
......
...@@ -14,9 +14,9 @@ describe 'Database schema' do ...@@ -14,9 +14,9 @@ describe 'Database schema' do
IGNORED_FK_COLUMNS = { IGNORED_FK_COLUMNS = {
abuse_reports: %w[reporter_id user_id], abuse_reports: %w[reporter_id user_id],
application_settings: %w[performance_bar_allowed_group_id slack_app_id snowplow_app_id eks_account_id eks_access_key_id], application_settings: %w[performance_bar_allowed_group_id slack_app_id snowplow_app_id eks_account_id eks_access_key_id],
approvers: %w[target_id user_id],
approvals: %w[user_id], approvals: %w[user_id],
approver_groups: %w[target_id], approver_groups: %w[target_id],
approvers: %w[target_id user_id],
audit_events: %w[author_id entity_id], audit_events: %w[author_id entity_id],
award_emoji: %w[awardable_id user_id], award_emoji: %w[awardable_id user_id],
aws_roles: %w[role_external_id], aws_roles: %w[role_external_id],
...@@ -29,12 +29,13 @@ describe 'Database schema' do ...@@ -29,12 +29,13 @@ describe 'Database schema' do
ci_trigger_requests: %w[commit_id], ci_trigger_requests: %w[commit_id],
cluster_providers_aws: %w[security_group_id vpc_id access_key_id], cluster_providers_aws: %w[security_group_id vpc_id access_key_id],
cluster_providers_gcp: %w[gcp_project_id operation_id], cluster_providers_gcp: %w[gcp_project_id operation_id],
commit_user_mentions: %w[commit_id],
deploy_keys_projects: %w[deploy_key_id], deploy_keys_projects: %w[deploy_key_id],
deployments: %w[deployable_id environment_id user_id], deployments: %w[deployable_id environment_id user_id],
draft_notes: %w[discussion_id commit_id], draft_notes: %w[discussion_id commit_id],
emails: %w[user_id], emails: %w[user_id],
events: %w[target_id],
epics: %w[updated_by_id last_edited_by_id state_id], epics: %w[updated_by_id last_edited_by_id state_id],
events: %w[target_id],
forked_project_links: %w[forked_from_project_id], forked_project_links: %w[forked_from_project_id],
geo_event_log: %w[hashed_storage_attachments_event_id], geo_event_log: %w[hashed_storage_attachments_event_id],
geo_job_artifact_deleted_events: %w[job_artifact_id], geo_job_artifact_deleted_events: %w[job_artifact_id],
...@@ -44,14 +45,14 @@ describe 'Database schema' do ...@@ -44,14 +45,14 @@ describe 'Database schema' do
geo_repository_deleted_events: %w[project_id], geo_repository_deleted_events: %w[project_id],
geo_upload_deleted_events: %w[upload_id model_id], geo_upload_deleted_events: %w[upload_id model_id],
gitlab_subscription_histories: %w[gitlab_subscription_id hosted_plan_id namespace_id], gitlab_subscription_histories: %w[gitlab_subscription_id hosted_plan_id namespace_id],
import_failures: %w[project_id],
identities: %w[user_id], identities: %w[user_id],
import_failures: %w[project_id],
issues: %w[last_edited_by_id state_id], issues: %w[last_edited_by_id state_id],
jira_tracker_data: %w[jira_issue_transition_id], jira_tracker_data: %w[jira_issue_transition_id],
keys: %w[user_id], keys: %w[user_id],
label_links: %w[target_id], label_links: %w[target_id],
lfs_objects_projects: %w[lfs_object_id project_id],
ldap_group_links: %w[group_id], ldap_group_links: %w[group_id],
lfs_objects_projects: %w[lfs_object_id project_id],
members: %w[source_id created_by_id], members: %w[source_id created_by_id],
merge_requests: %w[last_edited_by_id state_id], merge_requests: %w[last_edited_by_id state_id],
namespaces: %w[owner_id parent_id], namespaces: %w[owner_id parent_id],
...@@ -68,10 +69,11 @@ describe 'Database schema' do ...@@ -68,10 +69,11 @@ describe 'Database schema' do
repository_languages: %w[programming_language_id], repository_languages: %w[programming_language_id],
routes: %w[source_id], routes: %w[source_id],
sent_notifications: %w[project_id noteable_id recipient_id commit_id in_reply_to_discussion_id], sent_notifications: %w[project_id noteable_id recipient_id commit_id in_reply_to_discussion_id],
slack_integrations: %w[team_id user_id],
snippets: %w[author_id], snippets: %w[author_id],
spam_logs: %w[user_id], spam_logs: %w[user_id],
subscriptions: %w[user_id subscribable_id], subscriptions: %w[user_id subscribable_id],
slack_integrations: %w[team_id user_id], suggestions: %w[commit_id],
taggings: %w[tag_id taggable_id tagger_id], taggings: %w[tag_id taggable_id tagger_id],
timelogs: %w[user_id], timelogs: %w[user_id],
todos: %w[target_id commit_id], todos: %w[target_id commit_id],
...@@ -81,9 +83,7 @@ describe 'Database schema' do ...@@ -81,9 +83,7 @@ describe 'Database schema' do
users_star_projects: %w[user_id], users_star_projects: %w[user_id],
vulnerability_identifiers: %w[external_id], vulnerability_identifiers: %w[external_id],
vulnerability_scanners: %w[external_id], vulnerability_scanners: %w[external_id],
web_hooks: %w[service_id group_id], web_hooks: %w[service_id group_id]
suggestions: %w[commit_id],
commit_user_mentions: %w[commit_id]
}.with_indifferent_access.freeze }.with_indifferent_access.freeze
context 'for table' do context 'for table' do
......
...@@ -121,8 +121,10 @@ describe('new file modal component', () => { ...@@ -121,8 +121,10 @@ describe('new file modal component', () => {
}); });
describe('submitForm', () => { describe('submitForm', () => {
it('throws an error when target entry exists', () => { let store;
const store = createStore(); beforeEach(() => {
store = createStore();
store.state.entries = { store.state.entries = {
'test-path/test': { 'test-path/test': {
name: 'test', name: 'test',
...@@ -131,6 +133,9 @@ describe('new file modal component', () => { ...@@ -131,6 +133,9 @@ describe('new file modal component', () => {
}; };
vm = createComponentWithStore(Component, store).$mount(); vm = createComponentWithStore(Component, store).$mount();
});
it('throws an error when target entry exists', () => {
vm.open('rename', 'test-path/test'); vm.open('rename', 'test-path/test');
expect(createFlash).not.toHaveBeenCalled(); expect(createFlash).not.toHaveBeenCalled();
...@@ -146,5 +151,15 @@ describe('new file modal component', () => { ...@@ -146,5 +151,15 @@ describe('new file modal component', () => {
true, true,
); );
}); });
it('does not throw error when target entry does not exist', () => {
jest.spyOn(vm, 'renameEntry').mockImplementation();
vm.open('rename', 'test-path/test');
vm.entryName = 'test-path/test2';
vm.submitForm();
expect(createFlash).not.toHaveBeenCalled();
});
}); });
}); });
import { shallowMount } from '@vue/test-utils'; import { shallowMount } from '@vue/test-utils';
import IntervalPatternInput from '~/pages/projects/pipeline_schedules/shared/components/interval_pattern_input.vue'; import IntervalPatternInput from '~/pages/projects/pipeline_schedules/shared/components/interval_pattern_input.vue';
const cronIntervalPresets = {
everyDay: '0 4 * * *',
everyWeek: '0 4 * * 0',
everyMonth: '0 4 1 * *',
};
describe('Interval Pattern Input Component', () => { describe('Interval Pattern Input Component', () => {
let oldWindowGl; let oldWindowGl;
let wrapper; let wrapper;
const mockHour = 4;
const mockWeekDayIndex = 1;
const mockDay = 1;
const cronIntervalPresets = {
everyDay: `0 ${mockHour} * * *`,
everyWeek: `0 ${mockHour} * * ${mockWeekDayIndex}`,
everyMonth: `0 ${mockHour} ${mockDay} * *`,
};
const findEveryDayRadio = () => wrapper.find('#every-day'); const findEveryDayRadio = () => wrapper.find('#every-day');
const findEveryWeekRadio = () => wrapper.find('#every-week'); const findEveryWeekRadio = () => wrapper.find('#every-week');
const findEveryMonthRadio = () => wrapper.find('#every-month'); const findEveryMonthRadio = () => wrapper.find('#every-month');
...@@ -21,13 +25,20 @@ describe('Interval Pattern Input Component', () => { ...@@ -21,13 +25,20 @@ describe('Interval Pattern Input Component', () => {
const selectEveryMonthRadio = () => findEveryMonthRadio().setChecked(); const selectEveryMonthRadio = () => findEveryMonthRadio().setChecked();
const selectCustomRadio = () => findCustomRadio().trigger('click'); const selectCustomRadio = () => findCustomRadio().trigger('click');
const createWrapper = (props = {}) => { const createWrapper = (props = {}, data = {}) => {
if (wrapper) { if (wrapper) {
throw new Error('A wrapper already exists'); throw new Error('A wrapper already exists');
} }
wrapper = shallowMount(IntervalPatternInput, { wrapper = shallowMount(IntervalPatternInput, {
propsData: { ...props }, propsData: { ...props },
data() {
return {
randomHour: data?.hour || mockHour,
randomWeekDayIndex: mockWeekDayIndex,
randomDay: mockDay,
};
},
}); });
}; };
...@@ -47,39 +58,64 @@ describe('Interval Pattern Input Component', () => { ...@@ -47,39 +58,64 @@ describe('Interval Pattern Input Component', () => {
window.gl = oldWindowGl; window.gl = oldWindowGl;
}); });
describe('when prop initialCronInterval is passed', () => { describe('the input field defaults', () => {
describe('and prop initialCronInterval is custom', () => { beforeEach(() => {
beforeEach(() => { createWrapper();
createWrapper({ initialCronInterval: '1 2 3 4 5' }); });
});
it('the input is enabled', () => { it('to a non empty string when no initial value is not passed', () => {
expect(findCustomInput().attributes('disabled')).toBeUndefined(); expect(findCustomInput()).not.toBe('');
});
}); });
});
describe('and prop initialCronInterval is a preset', () => { describe('the input field', () => {
beforeEach(() => { const initialCron = '0 * * * *';
createWrapper({ initialCronInterval: cronIntervalPresets.everyDay });
});
it('the input is disabled', () => { beforeEach(() => {
expect(findCustomInput().attributes('disabled')).toBe('disabled'); createWrapper({ initialCronInterval: initialCron });
}); });
it('is equal to the prop `initialCronInterval` when passed', () => {
expect(findCustomInput().element.value).toBe(initialCron);
}); });
}); });
describe('when prop initialCronInterval is not passed', () => { describe('The input field is enabled', () => {
beforeEach(() => { beforeEach(() => {
createWrapper(); createWrapper();
}); });
it('the input is enabled since custom is default value', () => { it('when a default option is selected', () => {
expect(findCustomInput().attributes('disabled')).toBeUndefined(); selectEveryDayRadio();
return wrapper.vm.$nextTick().then(() => {
expect(findCustomInput().attributes('disabled')).toBeUndefined();
});
});
it('when the custom option is selected', () => {
selectCustomRadio();
return wrapper.vm.$nextTick().then(() => {
expect(findCustomInput().attributes('disabled')).toBeUndefined();
});
}); });
}); });
describe('User Actions', () => { describe('formattedTime computed property', () => {
it.each`
desc | hour | expectedValue
${'returns a time in the afternoon if the value of `random time` is higher than 12'} | ${13} | ${'1:00pm'}
${'returns a time in the morning if the value of `random time` is lower than 12'} | ${11} | ${'11:00am'}
${'returns "12:00pm" if the value of `random time` is exactly 12'} | ${12} | ${'12:00pm'}
`('$desc', ({ hour, expectedValue }) => {
createWrapper({}, { hour });
expect(wrapper.vm.formattedTime).toBe(expectedValue);
});
});
describe('User Actions with radio buttons', () => {
it.each` it.each`
desc | initialCronInterval | act | expectedValue desc | initialCronInterval | act | expectedValue
${'when everyday is selected, update value'} | ${'1 2 3 4 5'} | ${selectEveryDayRadio} | ${cronIntervalPresets.everyDay} ${'when everyday is selected, update value'} | ${'1 2 3 4 5'} | ${selectEveryDayRadio} | ${cronIntervalPresets.everyDay}
...@@ -96,4 +132,23 @@ describe('Interval Pattern Input Component', () => { ...@@ -96,4 +132,23 @@ describe('Interval Pattern Input Component', () => {
}); });
}); });
}); });
describe('User actions with input field for Cron syntax', () => {
beforeEach(() => {
createWrapper();
});
it('when editing the cron input it selects the custom radio button', () => {
const newValue = '0 * * * *';
findCustomInput().setValue(newValue);
expect(wrapper.vm.cronInterval).toBe(newValue);
});
it('when value of input is one of the defaults, it selects the corresponding radio button', () => {
findCustomInput().setValue(cronIntervalPresets.everyWeek);
expect(wrapper.vm.cronInterval).toBe(cronIntervalPresets.everyWeek);
});
});
}); });
...@@ -488,6 +488,7 @@ Service: ...@@ -488,6 +488,7 @@ Service:
- confidential_note_events - confidential_note_events
- deployment_events - deployment_events
- description - description
- inherit_from_id
ProjectHook: ProjectHook:
- id - id
- url - url
......
...@@ -64,6 +64,45 @@ describe Gitlab::Kubernetes::KubeClient do ...@@ -64,6 +64,45 @@ describe Gitlab::Kubernetes::KubeClient do
end end
end end
describe '.graceful_request' do
context 'successful' do
before do
allow(client).to receive(:foo).and_return(true)
end
it 'returns connected status and foo response' do
result = described_class.graceful_request(1) { client.foo }
expect(result).to eq({ status: :connected, response: true })
end
end
context 'errored' do
using RSpec::Parameterized::TableSyntax
where(:error, :error_status) do
SocketError | :unreachable
OpenSSL::X509::CertificateError | :authentication_failure
StandardError | :unknown_failure
Kubeclient::HttpError.new(408, "timed out", nil) | :unreachable
Kubeclient::HttpError.new(408, "timeout", nil) | :unreachable
Kubeclient::HttpError.new(408, "", nil) | :authentication_failure
end
with_them do
before do
allow(client).to receive(:foo).and_raise(error)
end
it 'returns error status' do
result = described_class.graceful_request(1) { client.foo }
expect(result).to eq({ status: error_status })
end
end
end
end
describe '#initialize' do describe '#initialize' do
shared_examples 'local address' do shared_examples 'local address' do
it 'blocks local addresses' do it 'blocks local addresses' do
...@@ -188,10 +227,25 @@ describe Gitlab::Kubernetes::KubeClient do ...@@ -188,10 +227,25 @@ describe Gitlab::Kubernetes::KubeClient do
end end
end end
describe '#metrics_client' do
subject { client.metrics_client }
it_behaves_like 'a Kubeclient'
it 'has the metrics API group endpoint' do
expect(subject.api_endpoint.to_s).to match(%r{\/apis\/metrics.k8s.io\Z})
end
it 'has the api_version' do
expect(subject.instance_variable_get(:@api_version)).to eq('v1beta1')
end
end
describe 'core API' do describe 'core API' do
let(:core_client) { client.core_client } let(:core_client) { client.core_client }
[ [
:get_nodes,
:get_pods, :get_pods,
:get_secrets, :get_secrets,
:get_config_map, :get_config_map,
......
...@@ -4,11 +4,11 @@ require 'spec_helper' ...@@ -4,11 +4,11 @@ require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20200427064130_cleanup_optimistic_locking_nulls_pt2_fixed.rb') require Rails.root.join('db', 'post_migrate', '20200427064130_cleanup_optimistic_locking_nulls_pt2_fixed.rb')
describe CleanupOptimisticLockingNullsPt2Fixed, :migration do describe CleanupOptimisticLockingNullsPt2Fixed, :migration do
TABLES = %w(ci_stages ci_builds ci_pipelines).freeze test_tables = %w(ci_stages ci_builds ci_pipelines).freeze
TABLES.each do |table| test_tables.each do |table|
let(table.to_sym) { table(table.to_sym) } let(table.to_sym) { table(table.to_sym) }
end end
let(:tables) { TABLES.map { |t| method(t.to_sym).call } } let(:tables) { test_tables.map { |t| method(t.to_sym).call } }
before do before do
# Create necessary rows # Create necessary rows
......
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'migrate', '20200513235532_fill_file_store_ci_job_artifacts.rb')
describe FillFileStoreCiJobArtifacts do
let(:namespaces) { table(:namespaces) }
let(:projects) { table(:projects) }
let(:builds) { table(:ci_builds) }
let(:job_artifacts) { table(:ci_job_artifacts) }
before do
namespaces.create!(id: 123, name: 'sample', path: 'sample')
projects.create!(id: 123, name: 'sample', path: 'sample', namespace_id: 123)
builds.create!(id: 1)
end
context 'when file_store is nil' do
it 'updates file_store to local' do
job_artifacts.create!(project_id: 123, job_id: 1, file_type: 1, file_store: nil)
job_artifact = job_artifacts.find_by(project_id: 123, job_id: 1)
expect { migrate! }.to change { job_artifact.reload.file_store }.from(nil).to(1)
end
end
context 'when file_store is set to local' do
it 'does not update file_store' do
job_artifacts.create!(project_id: 123, job_id: 1, file_type: 1, file_store: 1)
job_artifact = job_artifacts.find_by(project_id: 123, job_id: 1)
expect { migrate! }.not_to change { job_artifact.reload.file_store }
end
end
context 'when file_store is set to object storage' do
it 'does not update file_store' do
job_artifacts.create!(project_id: 123, job_id: 1, file_type: 1, file_store: 2)
job_artifact = job_artifacts.find_by(project_id: 123, job_id: 1)
expect { migrate! }.not_to change { job_artifact.reload.file_store }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'migrate', '20200513234502_fill_file_store_lfs_objects.rb')
describe FillFileStoreLfsObjects do
let(:lfs_objects) { table(:lfs_objects) }
let(:oid) { 'b804383982bb89b00e828e3f44c038cc991d3d1768009fc39ba8e2c081b9fb75' }
context 'when file_store is nil' do
it 'updates file_store to local' do
lfs_objects.create(oid: oid, size: 1062, file_store: nil)
lfs_object = lfs_objects.find_by(oid: oid)
expect { migrate! }.to change { lfs_object.reload.file_store }.from(nil).to(1)
end
end
context 'when file_store is set to local' do
it 'does not update file_store' do
lfs_objects.create(oid: oid, size: 1062, file_store: 1)
lfs_object = lfs_objects.find_by(oid: oid)
expect { migrate! }.not_to change { lfs_object.reload.file_store }
end
end
context 'when file_store is set to object storage' do
it 'does not update file_store' do
lfs_objects.create(oid: oid, size: 1062, file_store: 2)
lfs_object = lfs_objects.find_by(oid: oid)
expect { migrate! }.not_to change { lfs_object.reload.file_store }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'migrate', '20200513235347_fill_store_uploads.rb')
describe FillStoreUploads do
let(:uploads) { table(:uploads) }
let(:path) { 'uploads/-/system/avatar.jpg' }
context 'when store is nil' do
it 'updates store to local' do
uploads.create(size: 100.kilobytes,
uploader: 'AvatarUploader',
path: path,
store: nil)
upload = uploads.find_by(path: path)
expect { migrate! }.to change { upload.reload.store }.from(nil).to(1)
end
end
context 'when store is set to local' do
it 'does not update store' do
uploads.create(size: 100.kilobytes,
uploader: 'AvatarUploader',
path: path,
store: 1)
upload = uploads.find_by(path: path)
expect { migrate! }.not_to change { upload.reload.store }
end
end
context 'when store is set to object storage' do
it 'does not update store' do
uploads.create(size: 100.kilobytes,
uploader: 'AvatarUploader',
path: path,
store: 2)
upload = uploads.find_by(path: path)
expect { migrate! }.not_to change { upload.reload.store }
end
end
end
...@@ -378,19 +378,6 @@ describe Ci::JobArtifact do ...@@ -378,19 +378,6 @@ describe Ci::JobArtifact do
describe 'file is being stored' do describe 'file is being stored' do
subject { create(:ci_job_artifact, :archive) } subject { create(:ci_job_artifact, :archive) }
context 'when object has nil store' do
before do
subject.update_column(:file_store, nil)
subject.reload
end
it 'is stored locally' do
expect(subject.file_store).to be(nil)
expect(subject.file).to be_file_storage
expect(subject.file.object_store).to eq(ObjectStorage::Store::LOCAL)
end
end
context 'when existing object has local store' do context 'when existing object has local store' do
it 'is stored locally' do it 'is stored locally' do
expect(subject.file_store).to be(ObjectStorage::Store::LOCAL) expect(subject.file_store).to be(ObjectStorage::Store::LOCAL)
......
...@@ -948,6 +948,22 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -948,6 +948,22 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
end end
end end
describe '#nodes' do
let(:cluster) { create(:cluster) }
subject { cluster.nodes }
it { is_expected.to be_nil }
context 'with a cached status' do
before do
stub_reactive_cache(cluster, nodes: [kube_node])
end
it { is_expected.to eq([kube_node]) }
end
end
describe '#calculate_reactive_cache' do describe '#calculate_reactive_cache' do
subject { cluster.calculate_reactive_cache } subject { cluster.calculate_reactive_cache }
...@@ -956,6 +972,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -956,6 +972,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
it 'does not populate the cache' do it 'does not populate the cache' do
expect(cluster).not_to receive(:retrieve_connection_status) expect(cluster).not_to receive(:retrieve_connection_status)
expect(cluster).not_to receive(:retrieve_nodes)
is_expected.to be_nil is_expected.to be_nil
end end
...@@ -964,12 +981,12 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -964,12 +981,12 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
context 'cluster is enabled' do context 'cluster is enabled' do
let(:cluster) { create(:cluster, :provided_by_user, :group) } let(:cluster) { create(:cluster, :provided_by_user, :group) }
context 'connection to the cluster is successful' do before do
before do stub_kubeclient_nodes_and_nodes_metrics(cluster.platform.api_url)
stub_kubeclient_discover(cluster.platform.api_url) end
end
it { is_expected.to eq(connection_status: :connected) } context 'connection to the cluster is successful' do
it { is_expected.to eq(connection_status: :connected, nodes: [kube_node.merge(kube_node_metrics)]) }
end end
context 'cluster cannot be reached' do context 'cluster cannot be reached' do
...@@ -978,7 +995,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -978,7 +995,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
.and_raise(SocketError) .and_raise(SocketError)
end end
it { is_expected.to eq(connection_status: :unreachable) } it { is_expected.to eq(connection_status: :unreachable, nodes: []) }
end end
context 'cluster cannot be authenticated to' do context 'cluster cannot be authenticated to' do
...@@ -987,7 +1004,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -987,7 +1004,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
.and_raise(OpenSSL::X509::CertificateError.new("Certificate error")) .and_raise(OpenSSL::X509::CertificateError.new("Certificate error"))
end end
it { is_expected.to eq(connection_status: :authentication_failure) } it { is_expected.to eq(connection_status: :authentication_failure, nodes: []) }
end end
describe 'Kubeclient::HttpError' do describe 'Kubeclient::HttpError' do
...@@ -999,18 +1016,18 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -999,18 +1016,18 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
.and_raise(Kubeclient::HttpError.new(error_code, error_message, nil)) .and_raise(Kubeclient::HttpError.new(error_code, error_message, nil))
end end
it { is_expected.to eq(connection_status: :authentication_failure) } it { is_expected.to eq(connection_status: :authentication_failure, nodes: []) }
context 'generic timeout' do context 'generic timeout' do
let(:error_message) { 'Timed out connecting to server'} let(:error_message) { 'Timed out connecting to server'}
it { is_expected.to eq(connection_status: :unreachable) } it { is_expected.to eq(connection_status: :unreachable, nodes: []) }
end end
context 'gateway timeout' do context 'gateway timeout' do
let(:error_message) { '504 Gateway Timeout for GET https://kubernetes.example.com/api/v1'} let(:error_message) { '504 Gateway Timeout for GET https://kubernetes.example.com/api/v1'}
it { is_expected.to eq(connection_status: :unreachable) } it { is_expected.to eq(connection_status: :unreachable, nodes: []) }
end end
end end
...@@ -1020,11 +1037,12 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -1020,11 +1037,12 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
.and_raise(StandardError) .and_raise(StandardError)
end end
it { is_expected.to eq(connection_status: :unknown_failure) } it { is_expected.to eq(connection_status: :unknown_failure, nodes: []) }
it 'notifies Sentry' do it 'notifies Sentry' do
expect(Gitlab::ErrorTracking).to receive(:track_exception) expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(instance_of(StandardError), hash_including(cluster_id: cluster.id)) .with(instance_of(StandardError), hash_including(cluster_id: cluster.id))
.twice
subject subject
end end
......
...@@ -35,7 +35,7 @@ describe API::Metrics::Dashboard::Annotations do ...@@ -35,7 +35,7 @@ describe API::Metrics::Dashboard::Annotations do
context 'with invalid parameters' do context 'with invalid parameters' do
it 'returns error messsage' do it 'returns error messsage' do
post api(url, user), params: { dashboard_path: nil, starting_at: nil, description: nil } post api(url, user), params: { dashboard_path: '', starting_at: nil, description: nil }
expect(response).to have_gitlab_http_status(:bad_request) expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response['message']).to include({ "starting_at" => ["can't be blank"], "description" => ["can't be blank"], "dashboard_path" => ["can't be blank"] }) expect(json_response['message']).to include({ "starting_at" => ["can't be blank"], "description" => ["can't be blank"], "dashboard_path" => ["can't be blank"] })
...@@ -53,6 +53,41 @@ describe API::Metrics::Dashboard::Annotations do ...@@ -53,6 +53,41 @@ describe API::Metrics::Dashboard::Annotations do
post api(url, user), params: params post api(url, user), params: params
end end
end end
context 'with special characers in dashboard_path in request body' do
let(:dashboard_escaped) { 'config/prometheus/common_metrics%26copy.yml' }
let(:dashboard_unescaped) { 'config/prometheus/common_metrics&copy.yml' }
shared_examples 'special characters unescaped' do
let(:expected_params) do
{
'starting_at' => starting_at.to_time,
'ending_at' => ending_at.to_time,
"#{source_type}" => source,
'dashboard_path' => dashboard_unescaped,
'description' => params[:description]
}
end
it 'unescapes the dashboard_path', :aggregate_failures do
expect(::Metrics::Dashboard::Annotations::CreateService).to receive(:new).with(user, expected_params)
post api(url, user), params: params
end
end
context 'with escaped characters' do
it_behaves_like 'special characters unescaped' do
let(:dashboard) { dashboard_escaped }
end
end
context 'with unescaped characers' do
it_behaves_like 'special characters unescaped' do
let(:dashboard) { dashboard_unescaped }
end
end
end
end end
context 'without correct permissions' do context 'without correct permissions' do
......
...@@ -3,23 +3,41 @@ ...@@ -3,23 +3,41 @@
require 'spec_helper' require 'spec_helper'
describe ClusterSerializer do describe ClusterSerializer do
let(:cluster) { create(:cluster, :project, provider_type: :user) }
describe '#represent_list' do
subject { described_class.new.represent_list(cluster).keys }
it 'serializes attrs correctly' do
is_expected.to contain_exactly(
:cluster_type,
:enabled,
:environment_scope,
:name,
:nodes,
:path,
:status)
end
end
describe '#represent_status' do describe '#represent_status' do
subject { described_class.new.represent_status(cluster) } subject { described_class.new.represent_status(cluster).keys }
context 'when provider type is gcp and cluster is errored' do
let(:cluster) do
errored_provider = create(:cluster_provider_gcp, :errored)
context 'when provider type is gcp' do create(:cluster, provider_type: :gcp, provider_gcp: errored_provider)
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) } end
let(:provider) { create(:cluster_provider_gcp, :errored) }
it 'serializes only status' do it 'serializes attrs correctly' do
expect(subject.keys).to contain_exactly(:status, :status_reason, :applications) is_expected.to contain_exactly(:status, :status_reason, :applications)
end end
end end
context 'when provider type is user' do context 'when provider type is user' do
let(:cluster) { create(:cluster, provider_type: :user) } it 'serializes attrs correctly' do
is_expected.to contain_exactly(:status, :status_reason, :applications)
it 'serializes only status' do
expect(subject.keys).to contain_exactly(:status, :status_reason, :applications)
end end
end end
end end
......
...@@ -94,6 +94,31 @@ describe MergeRequests::RefreshService do ...@@ -94,6 +94,31 @@ describe MergeRequests::RefreshService do
expect(@fork_build_failed_todo).to be_done expect(@fork_build_failed_todo).to be_done
end end
context 'when a merge error exists' do
let(:error_message) { 'This is a merge error' }
before do
@merge_request = create(:merge_request,
source_project: @project,
source_branch: 'feature',
target_branch: 'master',
target_project: @project,
merge_error: error_message)
end
it 'clears merge errors when pushing to the source branch' do
expect { refresh_service.execute(@oldrev, @newrev, 'refs/heads/feature') }
.to change { @merge_request.reload.merge_error }
.from(error_message)
.to(nil)
end
it 'does not clear merge errors when pushing to the target branch' do
expect { refresh_service.execute(@oldrev, @newrev, 'refs/heads/master') }
.not_to change { @merge_request.reload.merge_error }
end
end
it 'reloads source branch MRs memoization' do it 'reloads source branch MRs memoization' do
refresh_service.execute(@oldrev, @newrev, 'refs/heads/master') refresh_service.execute(@oldrev, @newrev, 'refs/heads/master')
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
module KubernetesHelpers module KubernetesHelpers
include Gitlab::Kubernetes include Gitlab::Kubernetes
NODE_NAME = "gke-cluster-applications-default-pool-49b7f225-v527"
def kube_response(body) def kube_response(body)
{ body: body.to_json } { body: body.to_json }
end end
...@@ -11,6 +13,14 @@ module KubernetesHelpers ...@@ -11,6 +13,14 @@ module KubernetesHelpers
kube_response(kube_pods_body) kube_response(kube_pods_body)
end end
def nodes_response
kube_response(nodes_body)
end
def nodes_metrics_response
kube_response(nodes_metrics_body)
end
def kube_pod_response def kube_pod_response
kube_response(kube_pod) kube_response(kube_pod)
end end
...@@ -34,6 +44,9 @@ module KubernetesHelpers ...@@ -34,6 +44,9 @@ module KubernetesHelpers
WebMock WebMock
.stub_request(:get, api_url + '/apis/rbac.authorization.k8s.io/v1') .stub_request(:get, api_url + '/apis/rbac.authorization.k8s.io/v1')
.to_return(kube_response(kube_v1_rbac_authorization_discovery_body)) .to_return(kube_response(kube_v1_rbac_authorization_discovery_body))
WebMock
.stub_request(:get, api_url + '/apis/metrics.k8s.io/v1beta1')
.to_return(kube_response(kube_metrics_v1beta1_discovery_body))
end end
def stub_kubeclient_discover_istio(api_url) def stub_kubeclient_discover_istio(api_url)
...@@ -76,6 +89,22 @@ module KubernetesHelpers ...@@ -76,6 +89,22 @@ module KubernetesHelpers
WebMock.stub_request(:get, pods_url).to_return(response || kube_pods_response) WebMock.stub_request(:get, pods_url).to_return(response || kube_pods_response)
end end
def stub_kubeclient_nodes(api_url)
stub_kubeclient_discover_base(api_url)
nodes_url = api_url + "/api/v1/nodes"
WebMock.stub_request(:get, nodes_url).to_return(nodes_response)
end
def stub_kubeclient_nodes_and_nodes_metrics(api_url)
stub_kubeclient_nodes(api_url)
nodes_url = api_url + "/apis/metrics.k8s.io/v1beta1/nodes"
WebMock.stub_request(:get, nodes_url).to_return(nodes_metrics_response)
end
def stub_kubeclient_pods(namespace, status: nil) def stub_kubeclient_pods(namespace, status: nil)
stub_kubeclient_discover(service.api_url) stub_kubeclient_discover(service.api_url)
pods_url = service.api_url + "/api/v1/namespaces/#{namespace}/pods" pods_url = service.api_url + "/api/v1/namespaces/#{namespace}/pods"
...@@ -254,6 +283,7 @@ module KubernetesHelpers ...@@ -254,6 +283,7 @@ module KubernetesHelpers
{ {
"kind" => "APIResourceList", "kind" => "APIResourceList",
"resources" => [ "resources" => [
{ "name" => "nodes", "namespaced" => false, "kind" => "Node" },
{ "name" => "pods", "namespaced" => true, "kind" => "Pod" }, { "name" => "pods", "namespaced" => true, "kind" => "Pod" },
{ "name" => "deployments", "namespaced" => true, "kind" => "Deployment" }, { "name" => "deployments", "namespaced" => true, "kind" => "Deployment" },
{ "name" => "secrets", "namespaced" => true, "kind" => "Secret" }, { "name" => "secrets", "namespaced" => true, "kind" => "Secret" },
...@@ -314,6 +344,16 @@ module KubernetesHelpers ...@@ -314,6 +344,16 @@ module KubernetesHelpers
} }
end end
def kube_metrics_v1beta1_discovery_body
{
"kind" => "APIResourceList",
"resources" => [
{ "name" => "nodes", "namespaced" => false, "kind" => "NodeMetrics" },
{ "name" => "pods", "namespaced" => true, "kind" => "PodMetrics" }
]
}
end
def kube_istio_discovery_body def kube_istio_discovery_body
{ {
"kind" => "APIResourceList", "kind" => "APIResourceList",
...@@ -442,6 +482,20 @@ module KubernetesHelpers ...@@ -442,6 +482,20 @@ module KubernetesHelpers
} }
end end
def nodes_body
{
"kind" => "NodeList",
"items" => [kube_node]
}
end
def nodes_metrics_body
{
"kind" => "List",
"items" => [kube_node_metrics]
}
end
def kube_logs_body def kube_logs_body
"2019-12-13T14:04:22.123456Z Log 1\n2019-12-13T14:04:23.123456Z Log 2\n2019-12-13T14:04:24.123456Z Log 3" "2019-12-13T14:04:22.123456Z Log 1\n2019-12-13T14:04:23.123456Z Log 2\n2019-12-13T14:04:24.123456Z Log 3"
end end
...@@ -494,6 +548,40 @@ module KubernetesHelpers ...@@ -494,6 +548,40 @@ module KubernetesHelpers
} }
end end
# This is a partial response, it will have many more elements in reality but
# these are the ones we care about at the moment
def kube_node
{
"metadata" => {
"name" => NODE_NAME
},
"status" => {
"capacity" => {
"cpu" => "2",
"memory" => "7657228Ki"
},
"allocatable" => {
"cpu" => "1930m",
"memory" => "5777164Ki"
}
}
}
end
# This is a partial response, it will have many more elements in reality but
# these are the ones we care about at the moment
def kube_node_metrics
{
"metadata" => {
"name" => NODE_NAME
},
"usage" => {
"cpu" => "144208668n",
"memory" => "1789048Ki"
}
}
end
# Similar to a kube_pod, but should contain a running service # Similar to a kube_pod, but should contain a running service
def kube_knative_pod(name: "kube-pod", namespace: "default", status: "Running") def kube_knative_pod(name: "kube-pod", namespace: "default", status: "Running")
{ {
......
...@@ -71,7 +71,7 @@ shared_examples 'error tracking show page' do ...@@ -71,7 +71,7 @@ shared_examples 'error tracking show page' do
expect(page).to have_content('Stack trace') expect(page).to have_content('Stack trace')
end end
it 'renders the stack trace' do it 'renders the stack trace', :quarantine do
event_response['entries'][0]['data']['values'][0]['stacktrace']['frames'].each do |frame| event_response['entries'][0]['data']['values'][0]['stacktrace']['frames'].each do |frame|
expect(frame['filename']).not_to be_nil expect(frame['filename']).not_to be_nil
expect(page).to have_content(frame['filename']) expect(page).to have_content(frame['filename'])
......
...@@ -22,18 +22,6 @@ describe 'gitlab:artifacts namespace rake task' do ...@@ -22,18 +22,6 @@ describe 'gitlab:artifacts namespace rake task' do
context 'when local storage is used' do context 'when local storage is used' do
let(:store) { ObjectStorage::Store::LOCAL } let(:store) { ObjectStorage::Store::LOCAL }
context 'and job does not have file store defined' do
let(:object_storage_enabled) { true }
let(:store) { nil }
it "migrates file to remote storage" do
subject
expect(artifact.reload.file_store).to eq(ObjectStorage::Store::REMOTE)
expect(job_trace.reload.file_store).to eq(ObjectStorage::Store::REMOTE)
end
end
context 'and remote storage is defined' do context 'and remote storage is defined' do
let(:object_storage_enabled) { true } let(:object_storage_enabled) { true }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册