提交 db061f44 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 7a734536
......@@ -14,11 +14,8 @@ const DEFAULT_SNOWPLOW_OPTIONS = {
linkClickTracking: false,
};
const eventHandler = (e, func, opts = {}) => {
const el = e.target.closest('[data-track-event]');
const action = el && el.dataset.trackEvent;
if (!action) return;
const createEventPayload = (el, { suffix = '' } = {}) => {
const action = el.dataset.trackEvent + (suffix || '');
let value = el.dataset.trackValue || el.value || undefined;
if (el.type === 'checkbox' && !el.checked) value = false;
......@@ -29,7 +26,19 @@ const eventHandler = (e, func, opts = {}) => {
context: el.dataset.trackContext,
};
func(opts.category, action + (opts.suffix || ''), omitBy(data, isUndefined));
return {
action,
data: omitBy(data, isUndefined),
};
};
const eventHandler = (e, func, opts = {}) => {
const el = e.target.closest('[data-track-event]');
if (!el) return;
const { action, data } = createEventPayload(el, opts);
func(opts.category, action, data);
};
const eventHandlers = (category, func) => {
......@@ -62,17 +71,30 @@ export default class Tracking {
return window.snowplow('trackStructEvent', category, action, label, property, value, contexts);
}
static bindDocument(category = document.body.dataset.page, documentOverride = null) {
const el = documentOverride || document;
if (!this.enabled() || el.trackingBound) return [];
static bindDocument(category = document.body.dataset.page, parent = document) {
if (!this.enabled() || parent.trackingBound) return [];
el.trackingBound = true;
// eslint-disable-next-line no-param-reassign
parent.trackingBound = true;
const handlers = eventHandlers(category, (...args) => this.event(...args));
handlers.forEach(event => el.addEventListener(event.name, event.func));
handlers.forEach(event => parent.addEventListener(event.name, event.func));
return handlers;
}
static trackLoadEvents(category = document.body.dataset.page, parent = document) {
if (!this.enabled()) return [];
const loadEvents = parent.querySelectorAll('[data-track-event="render"]');
loadEvents.forEach(element => {
const { action, data } = createEventPayload(element);
this.event(category, action, data);
});
return loadEvents;
}
static mixin(opts = {}) {
return {
computed: {
......@@ -111,6 +133,7 @@ export function initUserTracking() {
if (opts.linkClickTracking) window.snowplow('enableLinkClickTracking');
Tracking.bindDocument();
Tracking.trackLoadEvents();
document.dispatchEvent(new Event('SnowplowInitialized'));
}
......@@ -10,8 +10,9 @@ class Projects::WikisController < Projects::ApplicationController
before_action :authorize_admin_wiki!, only: :destroy
before_action :load_project_wiki
before_action :load_page, only: [:show, :edit, :update, :history, :destroy]
before_action :valid_encoding?,
if: -> { %w[show edit update].include?(action_name) && load_page }
before_action only: [:show, :edit, :update] do
@valid_encoding = valid_encoding?
end
before_action only: [:edit, :update], unless: :valid_encoding? do
redirect_to(project_wiki_path(@project, @page))
end
......@@ -167,7 +168,11 @@ class Projects::WikisController < Projects::ApplicationController
end
def load_page
@page ||= @project_wiki.find_page(*page_params)
@page ||= find_page
end
def find_page
@project_wiki.find_page(*page_params)
end
def page_params
......@@ -178,9 +183,11 @@ class Projects::WikisController < Projects::ApplicationController
end
def valid_encoding?
strong_memoize(:valid_encoding) do
@page.content.encoding == Encoding::UTF_8
end
page_encoding == Encoding::UTF_8
end
def page_encoding
strong_memoize(:page_encoding) { @page&.content&.encoding }
end
def set_encoding_error
......
......@@ -215,6 +215,8 @@ module CommitsHelper
def commit_path(project, commit, merge_request: nil)
if merge_request&.persisted?
diffs_project_merge_request_path(project, merge_request, commit_id: commit.id)
elsif merge_request
project_commit_path(merge_request&.source_project, commit)
else
project_commit_path(project, commit)
end
......
# frozen_string_literal: true
module CycleAnalytics
class GroupLevel
include LevelBase
attr_reader :options, :group
def initialize(group:, options:)
@group = group
@options = options.merge(group: group)
end
def summary
@summary ||= ::Gitlab::CycleAnalytics::GroupStageSummary.new(group, options: options).data
end
def permissions(*)
STAGES.each_with_object({}) do |stage, obj|
obj[stage] = true
end
end
def stats
@stats ||= STAGES.map do |stage_name|
self[stage_name].as_json(serializer: GroupAnalyticsStageSerializer)
end
end
end
end
---
title: Set NULL `lock_version` values to 0 for CI objects
merge_request: 25396
author:
type: fixed
---
title: Eliminate errors in wiki controller during edit
merge_request: 29645
author:
type: fixed
---
title: Fix number of approvals given calculation
merge_request: 28293
author: Steffen Köhler
type: fixed
---
title: 'fix: use the source project to generate commit links for un-persisted merge requests'
merge_request: 29243
author: Chieh-Min Wang
type: fixed
......@@ -31,7 +31,7 @@ class CleanupOptimisticLockingNulls < ActiveRecord::Migration[5.2]
'CleanupOptimisticLockingNulls',
2.minutes,
batch_size: BATCH_SIZE,
other_job_arguments: [table]
other_arguments: [table]
)
end
end
......
# frozen_string_literal: true
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class CleanupOptimisticLockingNullsPt2 < ActiveRecord::Migration[5.2]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
TABLES = %w(ci_stages ci_builds ci_pipelines).freeze
BATCH_SIZE = 10_000
def declare_class(table)
Class.new(ActiveRecord::Base) do
include EachBatch
self.table_name = table
self.inheritance_column = :_type_disabled # Disable STI
end
end
def up
last_table_final_delay = 0
TABLES.each do |table|
add_concurrent_index table.to_sym, :lock_version, where: "lock_version IS NULL"
last_table_final_delay = queue_background_migration_jobs_by_range_at_intervals(
declare_class(table).where(lock_version: nil),
'CleanupOptimisticLockingNulls',
2.minutes,
batch_size: BATCH_SIZE,
other_job_arguments: [table],
initial_delay: last_table_final_delay
)
end
end
def down
TABLES.each do |table|
remove_concurrent_index table.to_sym, :lock_version, where: "lock_version IS NULL"
end
end
end
......@@ -8862,8 +8862,6 @@ CREATE INDEX index_ci_builds_on_commit_id_and_type_and_name_and_ref ON public.ci
CREATE INDEX index_ci_builds_on_commit_id_and_type_and_ref ON public.ci_builds USING btree (commit_id, type, ref);
CREATE INDEX index_ci_builds_on_lock_version ON public.ci_builds USING btree (lock_version) WHERE (lock_version IS NULL);
CREATE INDEX index_ci_builds_on_name_and_security_type_eq_ci_build ON public.ci_builds USING btree (name, id) WHERE (((name)::text = ANY (ARRAY[('container_scanning'::character varying)::text, ('dast'::character varying)::text, ('dependency_scanning'::character varying)::text, ('license_management'::character varying)::text, ('sast'::character varying)::text, ('license_scanning'::character varying)::text])) AND ((type)::text = 'Ci::Build'::text));
CREATE INDEX index_ci_builds_on_project_id_and_id ON public.ci_builds USING btree (project_id, id);
......@@ -8940,8 +8938,6 @@ CREATE INDEX index_ci_pipelines_on_auto_canceled_by_id ON public.ci_pipelines US
CREATE INDEX index_ci_pipelines_on_external_pull_request_id ON public.ci_pipelines USING btree (external_pull_request_id) WHERE (external_pull_request_id IS NOT NULL);
CREATE INDEX index_ci_pipelines_on_lock_version ON public.ci_pipelines USING btree (lock_version) WHERE (lock_version IS NULL);
CREATE INDEX index_ci_pipelines_on_merge_request_id ON public.ci_pipelines USING btree (merge_request_id) WHERE (merge_request_id IS NOT NULL);
CREATE INDEX index_ci_pipelines_on_pipeline_schedule_id ON public.ci_pipelines USING btree (pipeline_schedule_id);
......@@ -9010,8 +9006,6 @@ CREATE INDEX index_ci_sources_projects_on_pipeline_id ON public.ci_sources_proje
CREATE UNIQUE INDEX index_ci_sources_projects_on_source_project_id_and_pipeline_id ON public.ci_sources_projects USING btree (source_project_id, pipeline_id);
CREATE INDEX index_ci_stages_on_lock_version ON public.ci_stages USING btree (lock_version) WHERE (lock_version IS NULL);
CREATE INDEX index_ci_stages_on_pipeline_id ON public.ci_stages USING btree (pipeline_id);
CREATE UNIQUE INDEX index_ci_stages_on_pipeline_id_and_name ON public.ci_stages USING btree (pipeline_id, name);
......@@ -13070,7 +13064,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200214214934
20200215222507
20200215225103
20200217210353
20200217223651
20200217225719
20200218113721
......
---
# Checks for use of some of the top misused terms at GitLab.
#
# For a list of all options, see https://errata-ai.github.io/vale/styles/
extends: substitution
message: 'Use the American spelling "%s" instead of the British "%s".'
link: https://about.gitlab.com/handbook/communication/#top-misused-terms
level: error
ignorecase: true
swap:
aeon: eon
aeroplane: airplane
ageing: aging
aluminium: aluminum
anaemia: anemia
anaesthesia: anesthesia
analyse: analyze
annexe: annex
apologise: apologize
behaviour: behavior
busses: buses
calibre: caliber
centre: center
cheque: check
civilisation: civilization
civilise: civilize
colour: color
cosy: cozy
cypher: cipher
dependant: dependent
defence: defense
distil: distill
draught: draft
encyclopaedia: encyclopedia
enquiry: inquiry
enrol: enroll
enrolment: enrollment
enthral: enthrall
# equalled: equaled // Under discussion
# equalling: equaling // Under discussion
favourite: favorite
fibre: fiber
fillet: filet
flavour: flavor
furore: furor
fulfil: fulfill
gaol: jail
grey: gray
humour: humor
honour: honor
initialled: initialed
initialling: initialing
instil: instill
jewellery: jewelry
labelling: labeling
labelled: labeled
labour: labor
libellous: libelous
licence: license
likeable: likable
liveable: livable
lustre: luster
manoeuvre: maneuver
marvellous: marvelous
matt: matte
meagre: meager
metre: meter
mitre: miter
modelling: modeling
moustache: mustache
neighbour: neighbor
normalise: normalize
offence: offense
organise: organize
orientated: oriented
paralyse: paralyze
plough: plow
pretence: pretense
programme: program
pyjamas: pajamas
rateable: ratable
realise: realize
recognise: recognize
reconnoitre: reconnoiter
rumour: rumor
sabre: saber
saleable: salable
saltpetre: saltpeter
sceptic: skeptic
sepulchre: sepulcher
signalling: signaling
sizeable: sizable
skilful: skillful
sombre: somber
smoulder: smolder
speciality: specialty
spectre: specter
splendour: splendor
sulphur: sulfur
theatre: theater
travelled: traveled
traveller: traveler
travelling: traveling
unshakeable: unshakable
wilful: willful
yoghurt: yogurt
......@@ -246,7 +246,7 @@ is in sync with the primary. If the data is determined to be recent enough the
secondary can be used, otherwise it will be ignored. To reduce the overhead of
these checks we only perform these checks at certain intervals.
There are three configuration options that influence this behaviour:
There are three configuration options that influence this behavior:
| Option | Description | Default |
|------------------------------|----------------------------------------------------------------------------------------------------------------|------------|
......
......@@ -54,14 +54,14 @@ Feature.enable('geo_repository_verification')
Navigate to the **{admin}** **Admin Area >** **{location-dot}** **Geo** dashboard on the **primary** node and expand
the **Verification information** tab for that node to view automatic checksumming
status for repositories and wikis. Successes are shown in green, pending work
in grey, and failures in red.
in gray, and failures in red.
![Verification status](img/verification-status-primary.png)
Navigate to the **{admin}** **Admin Area >** **{location-dot}** **Geo** dashboard on the **secondary** node and expand
the **Verification information** tab for that node to view automatic verification
status for repositories and wikis. As with checksumming, successes are shown in
green, pending work in grey, and failures in red.
green, pending work in gray, and failures in red.
![Verification status](img/verification-status-secondary.png)
......
......@@ -95,7 +95,7 @@ ensure these processes are close to 100% as possible during active use.
Navigate to the **{admin}** **Admin Area >** **{location-dot}** **Geo** dashboard on the **secondary** node to
review status. Replicated objects (shown in green) should be close to 100%,
and there should be no failures (shown in red). If a large proportion of
objects aren't yet replicated (shown in grey), consider giving the node more
objects aren't yet replicated (shown in gray), consider giving the node more
time to complete
![Replication status](img/replication-status.png)
......
......@@ -184,7 +184,7 @@ The NFS man page states:
Read the [Linux man page](https://linux.die.net/man/5/nfs) to understand the difference,
and if you do use `soft`, ensure that you've taken steps to mitigate the risks.
If you experience behaviour that might have been caused by
If you experience behavior that might have been caused by
writes to disk on the NFS server not occurring, such as commits going missing,
use the `hard` option, because (from the man page):
......
......@@ -98,9 +98,9 @@ object storage back end, like when Git clients request large files via LFS or wh
downloading CI artifacts and logs.
When the files are stored on local block storage or NFS, GitLab has to act as a proxy.
This is not the default behaviour with object storage.
This is not the default behavior with object storage.
The `proxy_download` setting controls this behaviour: the default is generally `false`.
The `proxy_download` setting controls this behavior: the default is generally `false`.
Verify this in the documentation for each use case. Set it to `true` so that GitLab proxies
the files.
......
......@@ -427,7 +427,7 @@ NOTE: **Note:**
By default, users accessing a registry configured with a remote backend are redirected to the default backend for the storage driver. For example, registries can be configured using the `s3` storage driver, which redirects requests to a remote S3 bucket to alleviate load on the GitLab server.
However, this behaviour is undesirable for registries used by internal hosts that usually can't access public servers. To disable redirects, set the `disable` flag to true as follows. This makes all traffic to always go through the Registry service. This results in improved security (less surface attack as the storage backend is not publicly accessible), but worse performance (all traffic is redirected via the service).
However, this behavior is undesirable for registries used by internal hosts that usually can't access public servers. To disable redirects, set the `disable` flag to true as follows. This makes all traffic to always go through the Registry service. This results in improved security (less surface attack as the storage backend is not publicly accessible), but worse performance (all traffic is redirected via the service).
**Omnibus GitLab installations**
......
......@@ -2607,7 +2607,7 @@ rspec:
> Introduced in GitLab 9.4.
The default behaviour of a caching job is to download the files at the start of
The default behavior of a caching job is to download the files at the start of
execution, and to re-upload them at the end. This allows any changes made by the
job to be persisted for future runs, and is known as the `pull-push` cache
policy.
......@@ -3198,7 +3198,7 @@ job1:
### `retry`
> - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/12909) in GitLab 9.5.
> - [Behaviour expanded](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/21758) in GitLab 11.5 to control on which failures to retry.
> - [Behavior expanded](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/21758) in GitLab 11.5 to control on which failures to retry.
`retry` allows you to configure how many times a job is going to be retried in
case of a failure.
......
......@@ -110,7 +110,7 @@ When exposing an `ID` field on a type, we will by default try to
expose a global ID by calling `to_global_id` on the resource being
rendered.
To override this behaviour, you can implement an `id` method on the
To override this behavior, you can implement an `id` method on the
type for which you are exposing an ID. Please make sure that when
exposing a `GraphQL::ID_TYPE` using a custom method that it is
globally unique.
......@@ -598,7 +598,7 @@ ID. Otherwise use a [globally unique ID](#exposing-global-ids).
We already have a `FullPathLoader` that can be included in other
resolvers to quickly find Projects and Namespaces which will have a
lot of dependant objects.
lot of dependent objects.
To limit the amount of queries performed, we can use `BatchLoader`.
......
......@@ -100,7 +100,7 @@ Model.create(foo: params[:foo])
## Using HTTP status helpers
For non-200 HTTP responses, use the provided helpers in `lib/api/helpers.rb` to ensure correct behaviour (`not_found!`, `no_content!` etc.). These will `throw` inside Grape and abort the execution of your endpoint.
For non-200 HTTP responses, use the provided helpers in `lib/api/helpers.rb` to ensure correct behavior (`not_found!`, `no_content!` etc.). These will `throw` inside Grape and abort the execution of your endpoint.
For `DELETE` requests, you should also generally use the `destroy_conditionally!` helper which by default returns a `204 No Content` response on success, or a `412 Precondition Failed` response if the given `If-Unmodified-Since` header is out of range. This helper calls `#destroy` on the passed resource, but you can also implement a custom deletion method by passing a block.
......
......@@ -36,7 +36,7 @@ project.
To allow for asynchronous issue handling, we use [milestones](https://gitlab.com/groups/gitlab-org/-/milestones)
and [labels](https://gitlab.com/gitlab-org/gitlab/-/labels). Leads and product managers handle most of the
scheduling into milestones. Labelling is a task for everyone.
scheduling into milestones. Labeling is a task for everyone.
Most issues will have labels for at least one of the following:
......@@ -156,9 +156,9 @@ As a team needs some way to collect the work their members are planning to be as
Normally there is a 1:1 relationship between Stage labels and Group labels. In
the spirit of "Everyone can contribute", any issue can be picked up by any group,
depending on current priorities. When picking up an issue belonging to a different
group, it should be relabelled. For example, if an issue labelled `~"devops::create"`
group, it should be relabeled. For example, if an issue labeled `~"devops::create"`
and `~"group::knowledge"` is picked up by someone in the Access group of the Plan stage,
the issue should be relabelled as `~"group::access"` while keeping the original
the issue should be relabeled as `~"group::access"` while keeping the original
`~"devops::create"` unchanged.
We also use stage and group labels to help quantify our [throughput](https://about.gitlab.com/handbook/engineering/management/throughput/).
......@@ -347,7 +347,7 @@ there is the ~"stewardship" label.
This label is to be used for issues in which the stewardship of GitLab
is a topic of discussion. For instance if GitLab Inc. is planning to add
features from GitLab EE to GitLab CE, related issues would be labelled with
features from GitLab EE to GitLab CE, related issues would be labeled with
~"stewardship".
A recent example of this was the issue for
......@@ -461,7 +461,7 @@ fixing in the same MR, but not worth creating a follow-up issue for. Renaming a
method that is used in many places to make its intent slightly clearer may be
worth fixing, but it should not happen in the same MR, and is generally not
worth the overhead of having an issue of its own. These issues would invariably
be labelled `~P4 ~S4` if we were to create them.
be labeled `~P4 ~S4` if we were to create them.
More severe technical debt can have implications for development velocity. If
it isn't addressed in a timely manner, the codebase becomes needlessly difficult
......
......@@ -65,7 +65,7 @@ First, be aware of GitLab's [commitment to dogfooding](https://about.gitlab.com/
The code we write for Danger is GitLab-specific, and it **may not** be most
appropriate place to implement functionality that addresses a need we encounter.
Our users, customers, and even our own satellite projects, such as [Gitaly](https://gitlab.com/gitlab-org/gitaly),
often face similar challenges, after all. Think about how you could fulfil the
often face similar challenges, after all. Think about how you could fulfill the
same need while ensuring everyone can benefit from the work, and do that instead
if you can.
......
......@@ -111,7 +111,7 @@ There are a few gotchas with it:
smaller methods. Or create a "hook" method that is empty in CE,
and with the EE-specific implementation in EE.
- when the original implementation contains a guard clause (e.g.
`return unless condition`), we cannot easily extend the behaviour by
`return unless condition`), we cannot easily extend the behavior by
overriding the method, because we can't know when the overridden method
(i.e. calling `super` in the overriding method) would want to stop early.
In this case, we shouldn't just override it, but update the original method
......@@ -131,7 +131,7 @@ There are a few gotchas with it:
```
Instead of just overriding `Base#execute`, we should update it and extract
the behaviour into another method:
the behavior into another method:
```ruby
class Base
......@@ -613,9 +613,9 @@ module EE
end
```
#### EE-specific behaviour
#### EE-specific behavior
Sometimes we need EE-specific behaviour in some of the APIs. Normally we could
Sometimes we need EE-specific behavior in some of the APIs. Normally we could
use EE methods to override CE methods, however API routes are not methods and
therefore can't be simply overridden. We need to extract them into a standalone
method, or introduce some "hooks" where we could inject behavior in the CE
......
......@@ -39,7 +39,7 @@ bundle exec rake routes | grep "issues"
### 2. `modal_copy_button` vs `clipboard_button`
The `clipboard_button` uses the `copy_to_clipboard.js` behaviour, which is
The `clipboard_button` uses the `copy_to_clipboard.js` behavior, which is
initialized on page load, so if there are vue-based clipboard buttons that
don't exist at page load (such as ones in a `GlModal`), they do not have the
click handlers associated with the clipboard package.
......
......@@ -192,7 +192,7 @@ if (isThingNull) {
## ESLint
ESLint behaviour can be found in our [tooling guide](../tooling.md).
ESLint behavior can be found in our [tooling guide](../tooling.md).
## IIFEs
......
......@@ -14,7 +14,7 @@ documentation:
> answers to queries that subsequently use them. For these reasons, hash index
> use is presently discouraged.
RuboCop is configured to register an offence when it detects the use of a hash
RuboCop is configured to register an offense when it detects the use of a hash
index.
Instead of using hash indexes you should use regular btree indexes.
......@@ -24,7 +24,7 @@ The term `SHOULD` per the [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt) means
> carefully weighed before choosing a different course.
Ideally, each of these tradeoffs should be documented
in the separate issues, labelled accordingly and linked
in the separate issues, labeled accordingly and linked
to original issue and epic.
## Impact Analysis
......
......@@ -139,7 +139,7 @@ During this phase, the idea is to collect as much information as possible about
- **Requests**: Which requests are needed to have a working MVC. Ideally, produce
a list of all the requests needed for the MVC (including required actions). Further
investigation could provide an example for each request with the request and the response bodies.
- **Upload**: Carefully analyse how the upload process works. This will probably be the most
- **Upload**: Carefully analyze how the upload process works. This will probably be the most
complex request to implement. A detailed analysis is desired here as uploads can be
encoded in different ways (body or multipart) and can even be in a totally different
format (for example, a JSON structure where the package file is a Base64 value of
......
......@@ -109,7 +109,7 @@ In short:
By collecting snapshots of process state at regular intervals, profiling allows
you to see where time is spent in a process. The [StackProf](https://github.com/tmm1/stackprof)
gem is included in GitLab's development environment, allowing you to investigate
the behaviour of suspect code in detail.
the behavior of suspect code in detail.
It's important to note that profiling an application *alters its performance*,
and will generally be done *in an unrepresentative environment*. In particular,
......
......@@ -256,7 +256,7 @@ which `calculate_reactive_cache` can be called.
end
```
- The default behaviour can be overridden by defining a custom `reactive_cache_worker_finder`.
- The default behavior can be overridden by defining a custom `reactive_cache_worker_finder`.
```ruby
class Foo < ApplicationRecord
......
......@@ -78,7 +78,7 @@ The output of this example is `#<MatchData "bar">`, as Ruby treats the input `te
#### Impact
This Ruby Regex speciality can have security impact, as often regular expressions are used for validations or to impose restrictions on user-input.
This Ruby Regex specialty can have security impact, as often regular expressions are used for validations or to impose restrictions on user-input.
#### Examples
......
......@@ -337,7 +337,7 @@ Feature.enabled?(:ci_live_trace, project2) # => true
The code exercised by a single GitLab test may access and modify many items of
data. Without careful preparation before a test runs, and cleanup afterward,
data can be changed by a test in such a way that it affects the behaviour of
data can be changed by a test in such a way that it affects the behavior of
following tests. This should be avoided at all costs! Fortunately, the existing
test framework handles most cases already.
......
......@@ -540,7 +540,7 @@ and the basic idea is that the cost of a test includes:
### Frontend-related tests
There are cases where the behaviour you are testing is not worth the time spent
There are cases where the behavior you are testing is not worth the time spent
running the full application, for example, if you are testing styling, animation,
edge cases or small actions that don't involve the backend,
you should write an integration test using Jasmine.
......
......@@ -185,7 +185,7 @@ Currently supported parents:
The [original implementation](https://gitlab.com/gitlab-org/gitlab/issues/847) of value stream analytics defined 7 stages. These stages are always available for each parent, however altering these stages is not possible.
To make things efficient and reduce the number of records created, the default stages are expressed as in-memory objects (not persisted). When the user creates a custom stage for the first time, all the stages will be persisted. This behaviour is implemented in the value stream analytics service objects.
To make things efficient and reduce the number of records created, the default stages are expressed as in-memory objects (not persisted). When the user creates a custom stage for the first time, all the stages will be persisted. This behavior is implemented in the value stream analytics service objects.
The reason for this was that we'd like to add the abilities to hide and order stages later on.
......
......@@ -2,7 +2,7 @@
Sometimes certain bits of code may only work on a certain database
version. While we try to avoid such code as much as possible sometimes it is
necessary to add database (version) specific behaviour.
necessary to add database (version) specific behavior.
To facilitate this we have the following methods that you can use:
......
......@@ -237,19 +237,9 @@ On the Route 53 dashboard, click **Hosted zones** in the left navigation bar:
## PostgreSQL with RDS
For our database server we will use Amazon RDS which offers Multi AZ
for redundancy. Let's start by creating a subnet group and then we'll
for redundancy. First we'll create a security group and subnet group, then we'll
create the actual RDS instance.
### RDS Subnet Group
1. Navigate to the RDS dashboard and select **Subnet Groups** from the left menu.
1. Click on **Create DB Subnet Group**.
1. Under **Subnet group details**, enter a name (we'll use `gitlab-rds-group`), a description, and choose the `gitlab-vpc` from the VPC dropdown.
1. Under **Add subnets**, click **Add all the subnets related to this VPC** and remove the public ones, we only want the **private subnets**. In the end, you should see `10.0.1.0/24` and `10.0.3.0/24` (as we defined them in the [subnets section](#subnets)).
1. Click **Create** when ready.
![RDS Subnet Group](img/rds_subnet_group.png)
### RDS Security Group
We need a security group for our database that will allow inbound traffic from the instances we'll deploy in our `gitlab-loadbalancer-sec-group` later on:
......@@ -260,11 +250,21 @@ We need a security group for our database that will allow inbound traffic from t
1. In the **Inbound rules** section, click **Add rule** and add a **PostgreSQL** rule, and set the "Custom" source as the `gitlab-loadbalancer-sec-group` we created earlier. The default PostgreSQL port is `5432`, which we'll also use when creating our database below.
1. When done, click **Create security group**.
### RDS Subnet Group
1. Navigate to the RDS dashboard and select **Subnet Groups** from the left menu.
1. Click on **Create DB Subnet Group**.
1. Under **Subnet group details**, enter a name (we'll use `gitlab-rds-group`), a description, and choose the `gitlab-vpc` from the VPC dropdown.
1. Under **Add subnets**, click **Add all the subnets related to this VPC** and remove the public ones, we only want the **private subnets**. In the end, you should see `10.0.1.0/24` and `10.0.3.0/24` (as we defined them in the [subnets section](#subnets)).
1. Click **Create** when ready.
![RDS Subnet Group](img/rds_subnet_group.png)
### Create the database
Now, it's time to create the database:
1. Select **Databases** from the left menu and click **Create database**.
1. Navigate to the RDS dashboard, select **Databases** from the left menu, and click **Create database**.
1. Select **Standard Create** for the database creation method.
1. Select **PostgreSQL** as the database engine and select **PostgreSQL 10.9-R1** from the version dropdown menu (check the [database requirements](../../install/requirements.md#postgresql-requirements) to see if there are any updates on this for your chosen version of GitLab).
1. Since this is a production server, let's choose **Production** from the **Templates** section.
......@@ -296,15 +296,6 @@ Now that the database is created, let's move on to setting up Redis with ElastiC
ElastiCache is an in-memory hosted caching solution. Redis maintains its own
persistence and is used for certain types of the GitLab application.
### Redis Subnet Group
1. Navigate to the ElastiCache dashboard from your AWS console.
1. Go to **Subnet Groups** in the left menu, and create a new subnet group.
Make sure to select our VPC and its [private subnets](#subnets). Click
**Create** when ready.
![ElastiCache subnet](img/ec_subnet.png)
### Create a Redis Security Group
1. Navigate to the EC2 dashboard.
......@@ -314,6 +305,15 @@ persistence and is used for certain types of the GitLab application.
1. In the **Inbound rules** section, click **Add rule** and add a **Custom TCP** rule, set port `6379`, and set the "Custom" source as the `gitlab-loadbalancer-sec-group` we created earlier.
1. When done, click **Create security group**.
### Redis Subnet Group
1. Navigate to the ElastiCache dashboard from your AWS console.
1. Go to **Subnet Groups** in the left menu, and create a new subnet group (we'll name ours `gitlab-redis-group`).
Make sure to select our VPC and its [private subnets](#subnets). Click
**Create** when ready.
![ElastiCache subnet](img/ec_subnet.png)
### Create the Redis Cluster
1. Navigate back to the ElastiCache dashboard.
......
......@@ -217,7 +217,7 @@ If you push a LFS object to a project and you receive an error similar to:
the LFS client is trying to reach GitLab through HTTPS. However, your GitLab
instance is being served on HTTP.
This behaviour is caused by Git LFS using HTTPS connections by default when a
This behavior is caused by Git LFS using HTTPS connections by default when a
`lfsurl` is not set in the Git config.
To prevent this from happening, set the lfs url in project Git config:
......
......@@ -103,7 +103,7 @@ Our integrations add great value to GitLab. User questions often relate to integ
### Learn about the Support process
Zendesk is our Support Centre and our main communication line with our Customers. We communicate with customers through several other channels too
Zendesk is our Support Center and our main communication line with our Customers. We communicate with customers through several other channels too
- Familiarize yourself with ZenDesk:
- [UI Overview](https://support.zendesk.com/hc/en-us/articles/203661806-Introduction-to-the-Zendesk-agent-interface)
......
......@@ -69,7 +69,7 @@ Each stage of Value Stream Analytics is further described in the table below.
| **Stage** | **Description** |
| --------- | --------------- |
| Issue | Measures the median time between creating an issue and taking action to solve it, by either labeling it or adding it to a milestone, whatever comes first. The label will be tracked only if it already has an [Issue Board list](../project/issue_board.md#creating-a-new-list) created for it. |
| Issue | Measures the median time between creating an issue and taking action to solve it, by either labeling it or adding it to a milestone, whatever comes first. The label will be tracked only if it already has an [Issue Board list](../project/issue_board.md) created for it. |
| Plan | Measures the median time between the action you took for the previous stage, and pushing the first commit to the branch. The very first commit of the branch is the one that triggers the separation between **Plan** and **Code**, and at least one of the commits in the branch needs to contain the related issue number (e.g., `#42`). If none of the commits in the branch mention the related issue number, it is not considered to the measurement time of the stage. |
| Code | Measures the median time between pushing a first commit (previous stage) and creating a merge request (MR) related to that commit. The key to keep the process tracked is to include the [issue closing pattern](../project/issues/managing_issues.md#closing-issues-automatically) to the description of the merge request (for example, `Closes #xxx`, where `xxx` is the number of the issue related to this merge request). If the issue closing pattern is not present in the merge request description, the MR is not considered to the measurement time of the stage. |
| Test | Measures the median time to run the entire pipeline for that project. It's related to the time GitLab CI/CD takes to run every job for the commits pushed to that merge request defined in the previous stage. It is basically the start->finish time for all pipelines. |
......
......@@ -412,8 +412,8 @@ the report JSON unless stated otherwise. Presence of optional fields depends on
| `vulnerabilities[].message` | A short text that describes the vulnerability, it may include occurrence's specific information. Optional. |
| `vulnerabilities[].description` | A long text that describes the vulnerability. Optional. |
| `vulnerabilities[].cve` | (**DEPRECATED - use `vulnerabilities[].id` instead**) A fingerprint string value that represents a concrete occurrence of the vulnerability. It's used to determine whether two vulnerability occurrences are same or different. May not be 100% accurate. **This is NOT a [CVE](https://cve.mitre.org/)**. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Undefined` (an analyzer has not provided this information), `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. **Note:** Our current container scanning tool based on [klar](https://github.com/optiopay/klar) only provides the following levels: `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Undefined` (an analyzer has not provided this information), `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. **Note:** Our current container scanning tool based on [klar](https://github.com/optiopay/klar) does not provide a confidence level, so this value is currently hardcoded to `Unknown`. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. **Note:** Our current container scanning tool based on [klar](https://github.com/optiopay/klar) only provides the following levels: `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. **Note:** Our current container scanning tool based on [klar](https://github.com/optiopay/klar) does not provide a confidence level, so this value is currently hardcoded to `Unknown`. |
| `vulnerabilities[].solution` | Explanation of how to fix the vulnerability. Optional. |
| `vulnerabilities[].scanner` | A node that describes the analyzer used to find this vulnerability. |
| `vulnerabilities[].scanner.id` | Id of the scanner as a snake_case string. |
......
......@@ -384,8 +384,8 @@ the report JSON unless stated otherwise. Presence of optional fields depends on
| `vulnerabilities[].message` | A short text that describes the vulnerability, it may include occurrence's specific information. Optional. |
| `vulnerabilities[].description` | A long text that describes the vulnerability. Optional. |
| `vulnerabilities[].cve` | (**DEPRECATED - use `vulnerabilities[].id` instead**) A fingerprint string value that represents a concrete occurrence of the vulnerability. It's used to determine whether two vulnerability occurrences are same or different. May not be 100% accurate. **This is NOT a [CVE](https://cve.mitre.org/)**. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Undefined` (an analyzer has not provided this information), `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Undefined` (an analyzer has not provided this information), `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. |
| `vulnerabilities[].solution` | Explanation of how to fix the vulnerability. Optional. |
| `vulnerabilities[].scanner` | A node that describes the analyzer used to find this vulnerability. |
| `vulnerabilities[].scanner.id` | Id of the scanner as a snake_case string. |
......@@ -565,7 +565,7 @@ Add the following job section to `.gitlab-ci.yml`:
```yaml
gemnasium-python-dependency_scanning:
before_script:
- mkdir ~/.config/pip
- mkdir -p ~/.config/pip
- cp pip.conf ~/.config/pip/pip.conf
```
......
......@@ -456,8 +456,8 @@ the report JSON unless stated otherwise. Presence of optional fields depends on
| `vulnerabilities[].message` | A short text that describes the vulnerability, it may include the occurrence's specific information. Optional. |
| `vulnerabilities[].description` | A long text that describes the vulnerability. Optional. |
| `vulnerabilities[].cve` | (**DEPRECATED - use `vulnerabilities[].id` instead**) A fingerprint string value that represents a concrete occurrence of the vulnerability. It's used to determine whether two vulnerability occurrences are same or different. May not be 100% accurate. **This is NOT a [CVE](https://cve.mitre.org/)**. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Undefined` (an analyzer has not provided this information), `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Undefined` (an analyzer has not provided this information), `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. |
| `vulnerabilities[].severity` | How much the vulnerability impacts the software. Possible values: `Info`, `Unknown`, `Low`, `Medium`, `High`, `Critical`. |
| `vulnerabilities[].confidence` | How reliable the vulnerability's assessment is. Possible values: `Ignore`, `Unknown`, `Experimental`, `Low`, `Medium`, `High`, `Confirmed`. |
| `vulnerabilities[].solution` | Explanation of how to fix the vulnerability. Optional. |
| `vulnerabilities[].scanner` | A node that describes the analyzer used to find this vulnerability. |
| `vulnerabilities[].scanner.id` | Id of the scanner as a snake_case string. |
......
......@@ -49,19 +49,30 @@ The following languages and package managers are supported.
| Language | Package managers | Scan Tool |
|------------|-------------------------------------------------------------------|----------------------------------------------------------|
| JavaScript | [Bower](https://bower.io/), [npm](https://www.npmjs.com/), [yarn](https://yarnpkg.com/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Go | [Godep](https://github.com/tools/godep), go get ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)), gvt ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)), glide ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)), dep ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)), trash ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)) and govendor ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)), [go mod](https://github.com/golang/go/wiki/Modules) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| JavaScript | [Bower](https://bower.io/), [npm](https://www.npmjs.com/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Go | [Godep](https://github.com/tools/godep), [go mod](https://github.com/golang/go/wiki/Modules) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Java | [Gradle](https://gradle.org/), [Maven](https://maven.apache.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| .NET | [Nuget](https://www.nuget.org/) (.NET Framework is supported via the [mono project](https://www.mono-project.com/). Windows specific dependencies are not supported at this time.) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| .NET | [Nuget](https://www.nuget.org/) (.NET Framework is supported via the [mono project](https://www.mono-project.com/). Windows specific dependencies are not supported at this time.) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Python | [pip](https://pip.pypa.io/en/stable/) (Python is supported through [requirements.txt](https://pip.readthedocs.io/en/1.1/requirements.html) and [Pipfile.lock](https://github.com/pypa/pipfile#pipfilelock).) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Ruby | [gem](https://rubygems.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Erlang | [rebar](https://www.rebar3.org/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types))|[License Finder](https://github.com/pivotal/LicenseFinder)|
| Objective-C, Swift | [Carthage](https://github.com/Carthage/Carthage) , [CocoaPods v0.39 and below](https://cocoapods.org/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Elixir | [mix](https://elixir-lang.org/getting-started/mix-otp/introduction-to-mix.html) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types)) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| C++/C | [conan](https://conan.io/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types))|[License Finder](https://github.com/pivotal/LicenseFinder)|
| Scala | [sbt](https://www.scala-sbt.org/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types))|[License Finder](https://github.com/pivotal/LicenseFinder)|
| Rust | [cargo](https://crates.io/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types))|[License Finder](https://github.com/pivotal/LicenseFinder)|
| PHP | [composer](https://getcomposer.org/) ([experimental support](https://github.com/pivotal/LicenseFinder#experimental-project-types))|[License Finder](https://github.com/pivotal/LicenseFinder)|
| Ruby | [gem](https://rubygems.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Objective-C, Swift | [Carthage](https://github.com/Carthage/Carthage) |[License Finder](https://github.com/pivotal/LicenseFinder)|
### Experimental support
The following languages and package managers are [supported experimentally](https://github.com/pivotal/LicenseFinder#experimental-project-types),
which means that the reported licenses might be incomplete or inaccurate.
| Language | Package managers | Scan Tool |
|------------|-------------------------------------------------------------------|----------------------------------------------------------|
| JavaScript | [yarn](https://yarnpkg.com/)|[License Finder](https://github.com/pivotal/LicenseFinder)|
| Go | go get, gvt, glide, dep, trash, govendor |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Erlang | [rebar](https://www.rebar3.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Objective-C, Swift | [CocoaPods](https://cocoapods.org/) v0.39 and below |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Elixir | [mix](https://elixir-lang.org/getting-started/mix-otp/introduction-to-mix.html) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| C++/C | [conan](https://conan.io/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Scala | [sbt](https://www.scala-sbt.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| Rust | [cargo](https://crates.io/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
| PHP | [composer](https://getcomposer.org/) |[License Finder](https://github.com/pivotal/LicenseFinder)|
## Requirements
......
......@@ -27,7 +27,7 @@ need to be configured in a Bamboo build plan before GitLab can integrate.
1. In the left pane, select a build stage. If you have multiple build stages
you want to select the last stage that contains the Git checkout task.
1. Select the 'Miscellaneous' tab.
1. Under 'Pattern Match Labelling' put `${bamboo.repository.revision.number}`
1. Under 'Pattern Match Labeling' put `${bamboo.repository.revision.number}`
in the 'Labels' box.
1. Save
......
此差异已折叠。
......@@ -164,7 +164,7 @@ Suppose you have the labels `workflow::development`, `workflow::review`, and
applied, and a developer wanted to advance the issue to `workflow::review`, they
would simply apply that label, and the `workflow::development` label would
automatically be removed. This behavior already exists when you move issues
across label lists in an [issue board](issue_board.md#creating-workflows), but
across label lists in an [issue board](issue_board.md#create-workflows), but
now, team members who may not be working in an issue board directly would still
be able to advance workflow states consistently in issues themselves.
......
......@@ -215,7 +215,7 @@ minutes.
![Repository Languages bar](img/repository_languages_v12_2.gif)
Not all files are detected, among others; documentation,
vendored code, and most markup languages are excluded. This behaviour can be
vendored code, and most markup languages are excluded. This behavior can be
adjusted by overriding the default. For example, to enable `.proto` files to be
detected, add the following to `.gitattributes` in the root of your repository.
......
# frozen_string_literal: true
module Gitlab
module CycleAnalytics
class GroupStageSummary
attr_reader :group, :current_user, :options
def initialize(group, options:)
@group = group
@current_user = options[:current_user]
@options = options
end
def data
[issue_stats,
deploy_stats,
deployment_frequency_stats]
end
private
def issue_stats
serialize(
Summary::Group::Issue.new(
group: group, current_user: current_user, options: options)
)
end
def deployments_summary
@deployments_summary ||=
Summary::Group::Deploy.new(group: group, options: options)
end
def deploy_stats
serialize deployments_summary
end
def deployment_frequency_stats
serialize(
Summary::Group::DeploymentFrequency.new(
deployments: deployments_summary.value,
group: group,
options: options),
with_unit: true
)
end
def serialize(summary_object, with_unit: false)
AnalyticsSummarySerializer.new.represent(
summary_object, with_unit: with_unit)
end
end
end
end
# frozen_string_literal: true
module Gitlab
module CycleAnalytics
module Summary
module Group
class Base
attr_reader :group, :options
def initialize(group:, options:)
@group = group
@options = options
end
def title
raise NotImplementedError.new("Expected #{self.name} to implement title")
end
def value
raise NotImplementedError.new("Expected #{self.name} to implement value")
end
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module CycleAnalytics
module Summary
module Group
class Deploy < Group::Base
include GroupProjectsProvider
def title
n_('Deploy', 'Deploys', value)
end
def value
@value ||= find_deployments
end
private
def find_deployments
deployments = Deployment.joins(:project).merge(Project.inside_path(group.full_path))
deployments = deployments.where(projects: { id: options[:projects] }) if options[:projects]
deployments = deployments.where("deployments.created_at > ?", options[:from])
deployments = deployments.where("deployments.created_at < ?", options[:to]) if options[:to]
deployments.success.count
end
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module CycleAnalytics
module Summary
module Group
class DeploymentFrequency < Group::Base
include GroupProjectsProvider
include SummaryHelper
def initialize(deployments:, group:, options:)
@deployments = deployments
super(group: group, options: options)
end
def title
_('Deployment Frequency')
end
def value
@value ||=
frequency(@deployments, options[:from], options[:to] || Time.now)
end
def unit
_('per day')
end
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module CycleAnalytics
module Summary
module Group
class Issue < Group::Base
attr_reader :group, :current_user, :options
def initialize(group:, current_user:, options:)
@group = group
@current_user = current_user
@options = options
end
def title
n_('New Issue', 'New Issues', value)
end
def value
@value ||= find_issues
end
private
def find_issues
issues = IssuesFinder.new(current_user, finder_params).execute
issues = issues.where(projects: { id: options[:projects] }) if options[:projects]
issues.count
end
def finder_params
{
group_id: group.id,
include_subgroups: true,
created_after: options[:from],
created_before: options[:to]
}.compact
end
end
end
end
end
end
......@@ -1063,8 +1063,6 @@ into similar problems in the future (e.g. when new tables are created).
# batch_size - The maximum number of rows per job
# other_arguments - Other arguments to send to the job
#
# *Returns the final migration delay*
#
# Example:
#
# class Route < ActiveRecord::Base
......@@ -1081,7 +1079,7 @@ into similar problems in the future (e.g. when new tables are created).
# # do something
# end
# end
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_job_arguments: [], initial_delay: 0)
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_arguments: [])
raise "#{model_class} does not have an ID to use for batch ranges" unless model_class.column_names.include?('id')
# To not overload the worker too much we enforce a minimum interval both
......@@ -1090,19 +1088,14 @@ into similar problems in the future (e.g. when new tables are created).
delay_interval = BackgroundMigrationWorker.minimum_interval
end
final_delay = 0
model_class.each_batch(of: batch_size) do |relation, index|
start_id, end_id = relation.pluck(Arel.sql('MIN(id), MAX(id)')).first
# `BackgroundMigrationWorker.bulk_perform_in` schedules all jobs for
# the same time, which is not helpful in most cases where we wish to
# spread the work over time.
final_delay = initial_delay + delay_interval * index
migrate_in(final_delay, job_class_name, [start_id, end_id] + other_job_arguments)
migrate_in(delay_interval * index, job_class_name, [start_id, end_id] + other_arguments)
end
final_delay
end
# Fetches indexes on a column by name for postgres.
......
......@@ -9,13 +9,6 @@ module Gitlab
module ControllerConcern
extend ActiveSupport::Concern
included do
# Tracking events from the template is not ideal and we are moving this to the client in https://gitlab.com/gitlab-org/gitlab/-/issues/213712
# In the meantime, using this method from the view is frowned upon and this line will likely be removed
# in the near future
helper_method :track_event
end
protected
def track_event(action = action_name, **args)
......
......@@ -20231,6 +20231,9 @@ msgstr ""
msgid "Telephone number"
msgstr ""
msgid "Telephone number (Optional)"
msgstr ""
msgid "Template"
msgstr ""
......
......@@ -200,7 +200,20 @@ describe Projects::WikisController do
subject
expect(response).to redirect_to(project_wiki_path(project, project_wiki.list_pages.first))
expect(response).to redirect_to_wiki(project, project_wiki.list_pages.first)
end
end
context 'when the page has nil content' do
let(:page) { create(:wiki_page) }
it 'redirects to show' do
allow(page).to receive(:content).and_return(nil)
allow(controller).to receive(:find_page).and_return(page)
subject
expect(response).to redirect_to_wiki(project, page)
end
end
......@@ -235,7 +248,7 @@ describe Projects::WikisController do
allow(controller).to receive(:valid_encoding?).and_return(false)
subject
expect(response).to redirect_to(project_wiki_path(project, project_wiki.list_pages.first))
expect(response).to redirect_to_wiki(project, project_wiki.list_pages.first)
end
end
......@@ -265,4 +278,8 @@ describe Projects::WikisController do
page = wiki.page(title: title, dir: dir)
project_wiki.delete_page(page, "test commit")
end
def redirect_to_wiki(project, page)
redirect_to(controller.project_wiki_path(project, page))
end
end
......@@ -4,6 +4,7 @@ import Tracking, { initUserTracking } from '~/tracking';
describe('Tracking', () => {
let snowplowSpy;
let bindDocumentSpy;
let trackLoadEventsSpy;
beforeEach(() => {
window.snowplow = window.snowplow || (() => {});
......@@ -18,6 +19,7 @@ describe('Tracking', () => {
describe('initUserTracking', () => {
beforeEach(() => {
bindDocumentSpy = jest.spyOn(Tracking, 'bindDocument').mockImplementation(() => null);
trackLoadEventsSpy = jest.spyOn(Tracking, 'trackLoadEvents').mockImplementation(() => null);
});
it('calls through to get a new tracker with the expected options', () => {
......@@ -58,6 +60,11 @@ describe('Tracking', () => {
initUserTracking();
expect(bindDocumentSpy).toHaveBeenCalled();
});
it('tracks page loaded events', () => {
initUserTracking();
expect(trackLoadEventsSpy).toHaveBeenCalled();
});
});
describe('.event', () => {
......@@ -127,6 +134,7 @@ describe('Tracking', () => {
<input type="checkbox" data-track-event="toggle_checkbox" value="_value_" checked/>
<input class="dropdown" data-track-event="toggle_dropdown"/>
<div data-track-event="nested_event"><span class="nested"></span></div>
<input data-track-eventbogus="click_bogusinput" data-track-label="_label_" value="_value_"/>
`);
});
......@@ -139,6 +147,12 @@ describe('Tracking', () => {
});
});
it('does not bind to clicks on elements without [data-track-event]', () => {
trigger('[data-track-eventbogus="click_bogusinput"]');
expect(eventSpy).not.toHaveBeenCalled();
});
it('allows value override with the data-track-value attribute', () => {
trigger('[data-track-event="click_input2"]');
......@@ -178,6 +192,44 @@ describe('Tracking', () => {
});
});
describe('tracking page loaded events', () => {
let eventSpy;
beforeEach(() => {
eventSpy = jest.spyOn(Tracking, 'event');
setHTMLFixture(`
<input data-track-event="render" data-track-label="label1" value="_value_" data-track-property="_property_"/>
<span data-track-event="render" data-track-label="label2" data-track-value="_value_">
Something
</span>
<input data-track-event="_render_bogus_" data-track-label="label3" value="_value_" data-track-property="_property_"/>
`);
Tracking.trackLoadEvents('_category_'); // only happens once
});
it('sends tracking events when [data-track-event="render"] is on an element', () => {
expect(eventSpy.mock.calls).toEqual([
[
'_category_',
'render',
{
label: 'label1',
value: '_value_',
property: '_property_',
},
],
[
'_category_',
'render',
{
label: 'label2',
value: '_value_',
},
],
]);
});
});
describe('tracking mixin', () => {
describe('trackingOptions', () => {
it('return the options defined on initialisation', () => {
......
......@@ -82,4 +82,32 @@ describe CommitsHelper do
expect(helper.commit_to_html(commit, ref, project)).to include('<div class="commit-content')
end
end
describe 'commit_path' do
it 'returns a persisted merge request commit path' do
project = create(:project, :repository)
persisted_merge_request = create(:merge_request, source_project: project, target_project: project)
commit = project.repository.commit
expect(helper.commit_path(persisted_merge_request.project, commit, merge_request: persisted_merge_request))
.to eq(diffs_project_merge_request_path(project, persisted_merge_request, commit_id: commit.id))
end
it 'returns a non-persisted merge request commit path which commits still reside in the source project' do
source_project = create(:project, :repository)
target_project = create(:project, :repository)
non_persisted_merge_request = build(:merge_request, source_project: source_project, target_project: target_project)
commit = source_project.repository.commit
expect(helper.commit_path(non_persisted_merge_request.project, commit, merge_request: non_persisted_merge_request))
.to eq(project_commit_path(source_project, commit))
end
it 'returns a project commit path' do
project = create(:project, :repository)
commit = project.repository.commit
expect(helper.commit_path(project, commit)).to eq(project_commit_path(project, commit))
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::CycleAnalytics::GroupStageSummary do
let(:group) { create(:group) }
let(:project) { create(:project, :repository, namespace: group) }
let(:project_2) { create(:project, :repository, namespace: group) }
let(:from) { 1.day.ago }
let(:user) { create(:user, :admin) }
subject { described_class.new(group, options: { from: Time.now, current_user: user }).data }
describe "#new_issues" do
context 'with from date' do
before do
Timecop.freeze(5.days.ago) { create(:issue, project: project) }
Timecop.freeze(5.days.ago) { create(:issue, project: project_2) }
Timecop.freeze(5.days.from_now) { create(:issue, project: project) }
Timecop.freeze(5.days.from_now) { create(:issue, project: project_2) }
end
it "finds the number of issues created after it" do
expect(subject.first[:value]).to eq('2')
end
context 'with subgroups' do
before do
Timecop.freeze(5.days.from_now) { create(:issue, project: create(:project, namespace: create(:group, parent: group))) }
end
it "finds issues from them" do
expect(subject.first[:value]).to eq('3')
end
end
context 'with projects specified in options' do
before do
Timecop.freeze(5.days.from_now) { create(:issue, project: create(:project, namespace: group)) }
end
subject { described_class.new(group, options: { from: Time.now, current_user: user, projects: [project.id, project_2.id] }).data }
it 'finds issues from those projects' do
expect(subject.first[:value]).to eq('2')
end
end
context 'when `from` and `to` parameters are provided' do
subject { described_class.new(group, options: { from: 10.days.ago, to: Time.now, current_user: user }).data }
it 'finds issues from 5 days ago' do
expect(subject.first[:value]).to eq('2')
end
end
end
context 'with other projects' do
before do
Timecop.freeze(5.days.from_now) { create(:issue, project: create(:project, namespace: create(:group))) }
Timecop.freeze(5.days.from_now) { create(:issue, project: project) }
Timecop.freeze(5.days.from_now) { create(:issue, project: project_2) }
end
it "doesn't find issues from them" do
expect(subject.first[:value]).to eq('2')
end
end
end
describe "#deploys" do
context 'with from date' do
before do
Timecop.freeze(5.days.ago) { create(:deployment, :success, project: project) }
Timecop.freeze(5.days.from_now) { create(:deployment, :success, project: project) }
Timecop.freeze(5.days.ago) { create(:deployment, :success, project: project_2) }
Timecop.freeze(5.days.from_now) { create(:deployment, :success, project: project_2) }
end
it "finds the number of deploys made created after it" do
expect(subject.second[:value]).to eq('2')
end
context 'with subgroups' do
before do
Timecop.freeze(5.days.from_now) do
create(:deployment, :success, project: create(:project, :repository, namespace: create(:group, parent: group)))
end
end
it "finds deploys from them" do
expect(subject.second[:value]).to eq('3')
end
end
context 'with projects specified in options' do
before do
Timecop.freeze(5.days.from_now) do
create(:deployment, :success, project: create(:project, :repository, namespace: group, name: 'not_applicable'))
end
end
subject { described_class.new(group, options: { from: Time.now, current_user: user, projects: [project.id, project_2.id] }).data }
it 'shows deploys from those projects' do
expect(subject.second[:value]).to eq('2')
end
end
context 'when `from` and `to` parameters are provided' do
subject { described_class.new(group, options: { from: 10.days.ago, to: Time.now, current_user: user }).data }
it 'finds deployments from 5 days ago' do
expect(subject.second[:value]).to eq('2')
end
end
end
context 'with other projects' do
before do
Timecop.freeze(5.days.from_now) do
create(:deployment, :success, project: create(:project, :repository, namespace: create(:group)))
end
end
it "doesn't find deploys from them" do
expect(subject.second[:value]).to eq('-')
end
end
end
describe '#deployment_frequency' do
let(:from) { 6.days.ago }
let(:to) { nil }
subject do
described_class.new(group, options: {
from: from,
to: to,
current_user: user
}).data.third
end
it 'includes the unit: `per day`' do
expect(subject[:unit]).to eq(_('per day'))
end
before do
Timecop.freeze(5.days.ago) do
create(:deployment, :success, project: project)
end
end
context 'when `to` is nil' do
it 'includes range until now' do
# 1 deployment over 7 days
expect(subject[:value]).to eq('0.1')
end
end
context 'when `to` is given' do
let(:from) { 10.days.ago }
let(:to) { 10.days.from_now }
before do
Timecop.freeze(5.days.from_now) do
create(:deployment, :success, project: project)
end
end
it 'returns deployment frequency within `from` and `to` range' do
# 2 deployments over 20 days
expect(subject[:value]).to eq('0.1')
end
end
end
end
......@@ -1365,22 +1365,6 @@ describe Gitlab::Database::MigrationHelpers do
end
end
it 'returns the final expected delay' do
Sidekiq::Testing.fake! do
final_delay = model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, batch_size: 2)
expect(final_delay.to_f).to eq(20.minutes.to_f)
end
end
it 'returns zero when nothing gets queued' do
Sidekiq::Testing.fake! do
final_delay = model.queue_background_migration_jobs_by_range_at_intervals(User.none, 'FooJob', 10.minutes)
expect(final_delay).to eq(0)
end
end
context 'with batch_size option' do
it 'queues jobs correctly' do
Sidekiq::Testing.fake! do
......@@ -1405,25 +1389,12 @@ describe Gitlab::Database::MigrationHelpers do
end
end
context 'with other_job_arguments option' do
it 'queues jobs correctly' do
Sidekiq::Testing.fake! do
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_job_arguments: [1, 2])
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(10.minutes.from_now.to_f)
end
end
end
context 'with initial_delay option' do
context 'with other_arguments option' do
it 'queues jobs correctly' do
Sidekiq::Testing.fake! do
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_job_arguments: [1, 2], initial_delay: 10.minutes)
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_arguments: [1, 2])
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(20.minutes.from_now.to_f)
end
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(10.minutes.from_now.to_f)
end
end
end
......
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20200217210353_cleanup_optimistic_locking_nulls_pt2')
describe CleanupOptimisticLockingNullsPt2, :migration do
let(:ci_stages) { table(:ci_stages) }
let(:ci_builds) { table(:ci_builds) }
let(:ci_pipelines) { table(:ci_pipelines) }
let(:tables) { [ci_stages, ci_builds, ci_pipelines] }
before do
# Create necessary rows
ci_stages.create!
ci_builds.create!
ci_pipelines.create!
# Nullify `lock_version` column for all rows
# Needs to be done with a SQL fragment, otherwise Rails will coerce it to 0
tables.each do |table|
table.update_all('lock_version = NULL')
end
end
it 'correctly migrates nullified lock_version column', :sidekiq_might_not_need_inline do
tables.each do |table|
expect(table.where(lock_version: nil).count).to eq(1)
end
tables.each do |table|
expect(table.where(lock_version: 0).count).to eq(0)
end
migrate!
tables.each do |table|
expect(table.where(lock_version: nil).count).to eq(0)
end
tables.each do |table|
expect(table.where(lock_version: 0).count).to eq(1)
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe CycleAnalytics::GroupLevel do
let_it_be(:group) { create(:group)}
let_it_be(:project) { create(:project, :repository, namespace: group) }
let_it_be(:from_date) { 10.days.ago }
let_it_be(:user) { create(:user, :admin) }
let(:issue) { create(:issue, project: project, created_at: 2.days.ago) }
let_it_be(:milestone) { create(:milestone, project: project) }
let(:mr) { create_merge_request_closing_issue(user, project, issue, commit_message: "References #{issue.to_reference}") }
let(:pipeline) { create(:ci_empty_pipeline, status: 'created', project: project, ref: mr.source_branch, sha: mr.source_branch_sha, head_pipeline_of: mr) }
subject { described_class.new(group: group, options: { from: from_date, current_user: user }) }
describe '#permissions' do
it 'returns true for all stages' do
expect(subject.permissions.values.uniq).to eq([true])
end
end
describe '#stats' do
before do
create_cycle(user, project, issue, mr, milestone, pipeline)
deploy_master(user, project)
end
it 'returns medians for each stage for a specific group' do
expect(subject.no_stats?).to eq(false)
end
end
describe '#summary' do
before do
create_cycle(user, project, issue, mr, milestone, pipeline)
deploy_master(user, project)
end
it 'returns medians for each stage for a specific group' do
expect(subject.summary.map { |summary| summary[:value] }).to contain_exactly('0.1', '1', '1')
end
end
end
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册