提交 2ade98b6 编写于 作者: G GitLab Bot

Add latest changes from gitlab-org/gitlab@master

上级 908a54b6
......@@ -59,6 +59,9 @@ class Issue < ApplicationRecord
has_one :sentry_issue
has_one :alert_management_alert, class_name: 'AlertManagement::Alert'
has_and_belongs_to_many :self_managed_prometheus_alert_events, join_table: :issues_self_managed_prometheus_alert_events # rubocop: disable Rails/HasAndBelongsToMany
has_and_belongs_to_many :prometheus_alert_events, join_table: :issues_prometheus_alert_events # rubocop: disable Rails/HasAndBelongsToMany
has_many :prometheus_alerts, through: :prometheus_alert_events
accepts_nested_attributes_for :sentry_issue
......@@ -86,12 +89,14 @@ class Issue < ApplicationRecord
scope :preload_associated_models, -> { preload(:assignees, :labels, project: :namespace) }
scope :with_api_entity_associations, -> { preload(:timelogs, :assignees, :author, :notes, :labels, project: [:route, { namespace: :route }] ) }
scope :with_label_attributes, ->(label_attributes) { joins(:labels).where(labels: label_attributes) }
scope :with_alert_management_alerts, -> { joins(:alert_management_alert) }
scope :with_prometheus_alert_events, -> { joins(:issues_prometheus_alert_events) }
scope :with_self_managed_prometheus_alert_events, -> { joins(:issues_self_managed_prometheus_alert_events) }
scope :public_only, -> { where(confidential: false) }
scope :confidential_only, -> { where(confidential: true) }
scope :counts_by_state, -> { reorder(nil).group(:state_id).count }
scope :with_alert_management_alerts, -> { joins(:alert_management_alert) }
# An issue can be uniquely identified by project_id and iid
# Takes one or more sets of composite IDs, expressed as hash-like records of
......
---
title: Add count of alerts from all sources to usage ping
merge_request: 33220
author:
type: added
......@@ -124,9 +124,9 @@ regardless of the individual machine the scanner runs on.
Depending on the CI infrastructure,
the CI may have to fetch the Docker image every time the job runs.
To make the scanning job run fast, and to avoid wasting bandwidth,
it is important to make Docker images as small as possible,
ideally smaller than 50 MB.
For the scanning job to run fast and avoid wasting bandwidth, Docker images should be as small as
possible. You should aim for 50MB or smaller. If that isn't possible, try to keep it below 1.46 GB,
which is the size of a CD-ROM.
If the scanner requires a fully functional Linux environment,
it is recommended to use a [Debian](https://www.debian.org/intro/about) "slim" distribution or [Alpine Linux](https://www.alpinelinux.org/).
......@@ -135,6 +135,22 @@ and to compile the scanner with all the libraries it needs.
[Multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/)
might also help with keeping the image small.
To keep an image size small, consider using [dive](https://github.com/wagoodman/dive#dive) to analyze layers in a Docker image to
identify where additional bloat might be originating from.
In some cases, it might be difficult to remove files from an image. When this occurs, consider using
[Zstandard](https://github.com/facebook/zstd)
to compress files or large directories. Zstandard offers many different compression levels that can
decrease the size of your image with very little impact to decompression speed. It may be helpful to
automatically decompress any compressed directories as soon as an image launches. You can accomplish
this by adding a step to the Docker image's `/etc/bashrc` or to a specific user's `$HOME/.bashrc`.
Remember to change the entry point to launch a bash login shell if you chose the latter option.
Here are some examples to get you started:
- <https://gitlab.com/gitlab-org/security-products/license-management/-/blob/0b976fcffe0a9b8e80587adb076bcdf279c9331c/config/install.sh#L168-170>
- <https://gitlab.com/gitlab-org/security-products/license-management/-/blob/0b976fcffe0a9b8e80587adb076bcdf279c9331c/config/.bashrc#L49>
### Image tag
As documented in the [Docker Official Images](https://github.com/docker-library/official-images#tags-and-aliases) project,
......
......@@ -358,10 +358,10 @@ On GitLab.com, we have DangerBot setup to monitor Telemetry related files and Da
| `grafana_integrated_projects` | `counts` | | |
| `groups` | `counts` | | |
| `issues` | `counts` | | |
| `issues_created_from_gitlab_error_tracking_ui` | `counts` | | |
| `issues_with_associated_zoom_link` | `counts` | | |
| `issues_using_zoom_quick_actions` | `counts` | | |
| `issues_with_embedded_grafana_charts_approx` | `counts` | | |
| `issues_created_from_gitlab_error_tracking_ui` | `counts` | `monitor` | |
| `issues_with_associated_zoom_link` | `counts` | `monitor` | |
| `issues_using_zoom_quick_actions` | `counts` | `monitor` | |
| `issues_with_embedded_grafana_charts_approx` | `counts` | `monitor` | |
| `issues_with_health_status` | `counts` | | |
| `keys` | `counts` | | |
| `label_lists` | `counts` | | |
......@@ -373,7 +373,7 @@ On GitLab.com, we have DangerBot setup to monitor Telemetry related files and Da
| `projects` | `counts` | | |
| `projects_imported_from_github` | `counts` | | |
| `projects_with_repositories_enabled` | `counts` | | |
| `projects_with_error_tracking_enabled` | `counts` | | |
| `projects_with_error_tracking_enabled` | `counts` | `monitor` | |
| `protected_branches` | `counts` | | |
| `releases` | `counts` |`release` | Unique release tags |
| `remote_mirrors` | `counts` | | |
......@@ -462,6 +462,8 @@ On GitLab.com, we have DangerBot setup to monitor Telemetry related files and Da
| `alert_bot_incident_issues` | `counts` | `monitor` | Issues created by the alert bot |
| `incident_labeled_issues` | `counts` | `monitor` | Issues with the incident label |
| `issues_created_gitlab_alerts` | `counts` | `monitor` | Issues created from alerts by non-alert bot users |
| `issues_created_manually_from_alerts` | `counts` | `monitor` | Issues created from alerts by non-alert bot users |
| `issues_created_from_alerts` | `counts` | `monitor` | Issues created from Prometheus and alert management alerts |
| `ldap_group_links` | `counts` | | |
| `ldap_keys` | `counts` | | |
| `ldap_users` | `counts` | | |
......
......@@ -2,27 +2,18 @@
To enable the Microsoft Azure OAuth2 OmniAuth provider you must register your application with Azure. Azure will generate a client ID and secret key for you to use.
1. Sign in to the [Azure Portal](https://portal.azure.com).
Sign in to the [Azure Portal](https://portal.azure.com), and follow the instructions in
the [Microsoft Quickstart documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app).
1. Select "All Services" from the hamburger menu located top left and select "Azure Active Directory" or use the search bar at the top of the page to search for "Azure Active Directory".
1. You can select alternative directories by clicking the "switch tenant" button at the top of the Azure AD page.
As you go through the Microsoft procedure, keep the following in mind:
1. Select "App registrations" from the left hand menu, then select "New registration" from the top of the page.
1. Provide the required information and click the "Register" button.
- Name: 'GitLab' works just fine here.
- Supported account types: Select the appropriate choice based on the descriptions provided.
- Redirect URI: Enter the URL to the Azure OAuth callback of your GitLab installation (e.g. `https://gitlab.mycompany.com/users/auth/azure_oauth2/callback`), the type dropdown should be set to "Web".
1. On the "App Registration" page for the app you've created. Select "Certificates & secrets" on the left.
- Create a new Client secret by clicking "New client secret" and selecting a duration. Provide a description if required to help identify the secret.
- Copy the secret and note it securely, this is shown when you click the "add" button. (You will not be able to retrieve the secret when you perform the next step or leave that blade in the Azure Portal.)
1. Select "Overview" in the left hand menu.
1. Note the "Application (client) ID" from the section at the top of the displayed page.
1. Note the "Directory (tenant) ID" from the section at the top of the page.
- If you have multiple instances of Azure Active Directory, you can switch to the desired tenant.
- You're setting up a Web application.
- For the redirect URI, you'll need the URL of the Azure OAuth callback of your GitLab installation (for example, `https://gitlab.mycompany.com/users/auth/azure_oauth2/callback`). The type dropdown should be set to "Web".
- The `client ID` and `client secret` are terms associated with OAuth 2. In some Microsoft documentation,
the terms may be listed as `Application ID` and `Application Secret`.
- If you need to generate a new client secret, follow the Microsoft documentation on how to [Create a new application secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-a-new-application-secret).
- Save the client ID and client secret for your new app. Once you leave the Azure portal, you won't be able to find the client secret again.
1. On your GitLab server, open the configuration file.
......@@ -78,4 +69,7 @@ To enable the Microsoft Azure OAuth2 OmniAuth provider you must register your ap
1. [Reconfigure](../administration/restart_gitlab.md#omnibus-gitlab-reconfigure) or [restart GitLab](../administration/restart_gitlab.md#installations-from-source) for the changes to take effect if you
installed GitLab via Omnibus or from source respectively.
On the sign in page there should now be a Microsoft icon below the regular sign in form. Click the icon to begin the authentication process. Microsoft will ask the user to sign in and authorize the GitLab application. If everything goes well the user will be returned to GitLab and will be signed in. See [Enable OmniAuth for an Existing User](omniauth.md#enable-omniauth-for-an-existing-user) for information on how existing GitLab users can connect their newly available Azure AD accounts to their existing GitLab users.
On the sign-in page, you should now see a Microsoft icon below the regular sign in form. Click the icon
to begin the authentication process. Microsoft then asks you to sign in and authorize the GitLab application. If everything goes well, you are returned to GitLab and signed in.
See [Enable OmniAuth for an Existing User](omniauth.md#enable-omniauth-for-an-existing-user)
for information on how existing GitLab users can connect to their newly-available Azure AD accounts.
......@@ -79,11 +79,7 @@ module Gitlab
end
def update_foreign_keys(from_table, to_table, from_column, to_column, cascade_delete = nil)
if transaction_open?
raise 'partitioned foreign key operations can not be run inside a transaction block, ' \
'you can disable transaction blocks by calling disable_ddl_transaction! ' \
'in the body of your migration class'
end
assert_not_in_transaction_block(scope: 'partitioned foreign key')
from_column ||= "#{to_table.to_s.singularize}_id"
specified_key = fk_from_spec(from_table, to_table, from_column, to_column, cascade_delete)
......@@ -103,7 +99,7 @@ module Gitlab
drop_function(fn_name, if_exists: true)
else
create_or_replace_fk_function(fn_name, final_keys)
create_function_trigger(trigger_name, fn_name, fires: "AFTER DELETE ON #{to_table}")
create_trigger(trigger_name, fn_name, fires: "AFTER DELETE ON #{to_table}")
end
end
end
......@@ -116,13 +112,6 @@ module Gitlab
end
end
def with_lock_retries(&block)
Gitlab::Database::WithLockRetries.new({
klass: self.class,
logger: Gitlab::BackgroundMigration::Logger
}).run(&block)
end
def find_existing_key(keys, key)
keys.find { |k| k.from_table == key.from_table && k.from_column == key.from_column }
end
......
......@@ -4,7 +4,10 @@ module Gitlab
module Database
module PartitioningMigrationHelpers
module TableManagementHelpers
include SchemaHelpers
include ::Gitlab::Database::SchemaHelpers
WHITELISTED_TABLES = %w[audit_events].freeze
ERROR_SCOPE = 'table partitioning'
# Creates a partitioned copy of an existing table, using a RANGE partitioning strategy on a timestamp column.
# One partition is created per month between the given `min_date` and `max_date`.
......@@ -20,6 +23,9 @@ module Gitlab
# :max_date - a date specifying the upper bounds of the partitioning range
#
def partition_table_by_date(table_name, column_name, min_date:, max_date:)
assert_table_is_whitelisted(table_name)
assert_not_in_transaction_block(scope: ERROR_SCOPE)
raise "max_date #{max_date} must be greater than min_date #{min_date}" if min_date >= max_date
primary_key = connection.primary_key(table_name)
......@@ -31,6 +37,7 @@ module Gitlab
new_table_name = partitioned_table_name(table_name)
create_range_partitioned_copy(new_table_name, table_name, partition_column, primary_key)
create_daterange_partitions(new_table_name, partition_column.name, min_date, max_date)
create_sync_trigger(table_name, new_table_name, primary_key)
end
# Clean up a partitioned copy of an existing table. This deletes the partitioned table and all partitions.
......@@ -40,40 +47,76 @@ module Gitlab
# drop_partitioned_table_for :audit_events
#
def drop_partitioned_table_for(table_name)
drop_table(partitioned_table_name(table_name))
assert_table_is_whitelisted(table_name)
assert_not_in_transaction_block(scope: ERROR_SCOPE)
with_lock_retries do
trigger_name = sync_trigger_name(table_name)
drop_trigger(table_name, trigger_name)
end
function_name = sync_function_name(table_name)
drop_function(function_name)
part_table_name = partitioned_table_name(table_name)
drop_table(part_table_name)
end
private
def assert_table_is_whitelisted(table_name)
return if WHITELISTED_TABLES.include?(table_name.to_s)
raise "partitioning helpers are in active development, and #{table_name} is not whitelisted for use, " \
"for more information please contact the database team"
end
def partitioned_table_name(table)
tmp_table_name("#{table}_part")
end
def sync_function_name(table)
object_name(table, 'table_sync_function')
end
def sync_trigger_name(table)
object_name(table, 'table_sync_trigger')
end
def find_column_definition(table, column)
connection.columns(table).find { |c| c.name == column.to_s }
end
def create_range_partitioned_copy(table_name, template_table_name, partition_column, primary_key)
tmp_column_name = object_name(partition_column.name, 'partition_key')
if table_exists?(table_name)
# rubocop:disable Gitlab/RailsLogger
Rails.logger.warn "Partitioned table not created because it already exists" \
" (this may be due to an aborted migration or similar): table_name: #{table_name} "
# rubocop:enable Gitlab/RailsLogger
return
end
execute(<<~SQL)
CREATE TABLE #{table_name} (
LIKE #{template_table_name} INCLUDING ALL EXCLUDING INDEXES,
#{tmp_column_name} #{partition_column.sql_type} NOT NULL,
PRIMARY KEY (#{[primary_key, tmp_column_name].join(", ")})
) PARTITION BY RANGE (#{tmp_column_name})
SQL
remove_column(table_name, partition_column.name)
rename_column(table_name, tmp_column_name, partition_column.name)
change_column_default(table_name, primary_key, nil)
tmp_column_name = object_name(partition_column.name, 'partition_key')
transaction do
execute(<<~SQL)
CREATE TABLE #{table_name} (
LIKE #{template_table_name} INCLUDING ALL EXCLUDING INDEXES,
#{tmp_column_name} #{partition_column.sql_type} NOT NULL,
PRIMARY KEY (#{[primary_key, tmp_column_name].join(", ")})
) PARTITION BY RANGE (#{tmp_column_name})
SQL
remove_column(table_name, partition_column.name)
rename_column(table_name, tmp_column_name, partition_column.name)
change_column_default(table_name, primary_key, nil)
end
end
def create_daterange_partitions(table_name, column_name, min_date, max_date)
min_date = min_date.beginning_of_month.to_date
max_date = max_date.next_month.beginning_of_month.to_date
create_range_partition("#{table_name}_000000", table_name, 'MINVALUE', to_sql_date_literal(min_date))
create_range_partition_safely("#{table_name}_000000", table_name, 'MINVALUE', to_sql_date_literal(min_date))
while min_date < max_date
partition_name = "#{table_name}_#{min_date.strftime('%Y%m')}"
......@@ -81,7 +124,7 @@ module Gitlab
lower_bound = to_sql_date_literal(min_date)
upper_bound = to_sql_date_literal(next_date)
create_range_partition(partition_name, table_name, lower_bound, upper_bound)
create_range_partition_safely(partition_name, table_name, lower_bound, upper_bound)
min_date = next_date
end
end
......@@ -90,11 +133,55 @@ module Gitlab
connection.quote(date.strftime('%Y-%m-%d'))
end
def create_range_partition(partition_name, table_name, lower_bound, upper_bound)
execute(<<~SQL)
CREATE TABLE #{partition_name} PARTITION OF #{table_name}
FOR VALUES FROM (#{lower_bound}) TO (#{upper_bound})
SQL
def create_range_partition_safely(partition_name, table_name, lower_bound, upper_bound)
if table_exists?(partition_name)
# rubocop:disable Gitlab/RailsLogger
Rails.logger.warn "Partition not created because it already exists" \
" (this may be due to an aborted migration or similar): partition_name: #{partition_name}"
# rubocop:enable Gitlab/RailsLogger
return
end
create_range_partition(partition_name, table_name, lower_bound, upper_bound)
end
def create_sync_trigger(source_table, target_table, unique_key)
function_name = sync_function_name(source_table)
trigger_name = sync_trigger_name(source_table)
with_lock_retries do
create_sync_function(function_name, target_table, unique_key)
create_comment('FUNCTION', function_name, "Partitioning migration: table sync for #{source_table} table")
create_trigger(trigger_name, function_name, fires: "AFTER INSERT OR UPDATE OR DELETE ON #{source_table}")
end
end
def create_sync_function(name, target_table, unique_key)
delimiter = ",\n "
column_names = connection.columns(target_table).map(&:name)
set_statements = build_set_statements(column_names, unique_key)
insert_values = column_names.map { |name| "NEW.#{name}" }
create_trigger_function(name, replace: false) do
<<~SQL
IF (TG_OP = 'DELETE') THEN
DELETE FROM #{target_table} where #{unique_key} = OLD.#{unique_key};
ELSIF (TG_OP = 'UPDATE') THEN
UPDATE #{target_table}
SET #{set_statements.join(delimiter)}
WHERE #{target_table}.#{unique_key} = NEW.#{unique_key};
ELSIF (TG_OP = 'INSERT') THEN
INSERT INTO #{target_table} (#{column_names.join(delimiter)})
VALUES (#{insert_values.join(delimiter)});
END IF;
RETURN NULL;
SQL
end
end
def build_set_statements(column_names, unique_key)
column_names.reject { |name| name == unique_key }.map { |column_name| "#{column_name} = NEW.#{column_name}" }
end
end
end
......
......@@ -16,12 +16,12 @@ module Gitlab
SQL
end
def create_function_trigger(name, fn_name, fires: nil)
def create_trigger(name, function_name, fires: nil)
execute(<<~SQL)
CREATE TRIGGER #{name}
#{fires}
FOR EACH ROW
EXECUTE PROCEDURE #{fn_name}()
EXECUTE PROCEDURE #{function_name}()
SQL
end
......@@ -35,6 +35,10 @@ module Gitlab
execute("DROP TRIGGER #{exists_clause} #{name} ON #{table_name}")
end
def create_comment(type, name, text)
execute("COMMENT ON #{type} #{name} IS '#{text}'")
end
def tmp_table_name(base)
hashed_base = Digest::SHA256.hexdigest(base).first(10)
......@@ -48,8 +52,30 @@ module Gitlab
"#{type}_#{hashed_identifier}"
end
def with_lock_retries(&block)
Gitlab::Database::WithLockRetries.new({
klass: self.class,
logger: Gitlab::BackgroundMigration::Logger
}).run(&block)
end
def assert_not_in_transaction_block(scope:)
return unless transaction_open?
raise "#{scope} operations can not be run inside a transaction block, " \
"you can disable transaction blocks by calling disable_ddl_transaction! " \
"in the body of your migration class"
end
private
def create_range_partition(partition_name, table_name, lower_bound, upper_bound)
execute(<<~SQL)
CREATE TABLE #{partition_name} PARTITION OF #{table_name}
FOR VALUES FROM (#{lower_bound}) TO (#{upper_bound})
SQL
end
def optional_clause(flag, clause)
flag ? clause : ""
end
......
......@@ -69,6 +69,7 @@ module Gitlab
# rubocop: disable CodeReuse/ActiveRecord
def system_usage_data
alert_bot_incident_count = count(::Issue.authored(::User.alert_bot))
issues_created_manually_from_alerts = count(Issue.with_alert_management_alerts.not_authored_by(::User.alert_bot))
{
counts: {
......@@ -119,7 +120,9 @@ module Gitlab
issues_with_associated_zoom_link: count(ZoomMeeting.added_to_issue),
issues_using_zoom_quick_actions: distinct_count(ZoomMeeting, :issue_id),
issues_with_embedded_grafana_charts_approx: grafana_embed_usage_data,
issues_created_gitlab_alerts: count(Issue.with_alert_management_alerts.not_authored_by(::User.alert_bot)),
issues_created_from_alerts: total_alert_issues,
issues_created_gitlab_alerts: issues_created_manually_from_alerts,
issues_created_manually_from_alerts: issues_created_manually_from_alerts,
incident_issues: alert_bot_incident_count,
alert_bot_incident_issues: alert_bot_incident_count,
incident_labeled_issues: count(::Issue.with_label_attributes(IncidentManagement::CreateIssueService::INCIDENT_LABEL)),
......@@ -449,6 +452,16 @@ module Gitlab
private
def total_alert_issues
# Remove prometheus table queries once they are deprecated
# To be removed with https://gitlab.com/gitlab-org/gitlab/-/issues/217407.
[
count(Issue.with_alert_management_alerts),
count(::Issue.with_self_managed_prometheus_alert_events),
count(::Issue.with_prometheus_alert_events)
].reduce(:+)
end
def user_minimum_id
strong_memoize(:user_minimum_id) do
::User.minimum(:id)
......
......@@ -58,6 +58,7 @@ FactoryBot.define do
# Alert Issues
create(:alert_management_alert, issue: issues[0], project: projects[0])
create(:alert_management_alert, issue: alert_bot_issues[0], project: projects[0])
create(:self_managed_prometheus_alert_event, related_issues: [issues[1]], project: projects[0])
# Enabled clusters
gcp_cluster = create(:cluster_provider_gcp, :created).cluster
......
......@@ -3,6 +3,8 @@
require 'spec_helper'
describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
include TriggerHelpers
let(:model) do
ActiveRecord::Migration.new.extend(described_class)
end
......@@ -27,7 +29,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
model.add_partitioned_foreign_key :issue_assignees, referenced_table
expect_function_to_contain(function_name, 'delete from issue_assignees where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -43,7 +45,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
expect_function_to_contain(function_name,
'delete from issue_assignees where issue_id = old.id',
'delete from epic_issues where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -59,7 +61,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
expect_function_to_contain(function_name,
'delete from issues where moved_to_id = old.id',
'delete from issues where duplicated_to_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -68,7 +70,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
model.add_partitioned_foreign_key :issues, referenced_table, column: :moved_to_id
expect_function_to_contain(function_name, 'delete from issues where moved_to_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
end
......@@ -79,7 +81,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
model.add_partitioned_foreign_key :issue_assignees, referenced_table, on_delete: :nullify
expect_function_to_contain(function_name, 'update issue_assignees set issue_id = null where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -88,7 +90,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
model.add_partitioned_foreign_key :issues, referenced_table, column: :duplicated_to_id
expect_function_to_contain(function_name, 'delete from issues where duplicated_to_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -99,7 +101,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
model.add_partitioned_foreign_key :user_preferences, referenced_table, column: :user_id, primary_key: :user_id
expect_function_to_contain(function_name, 'delete from user_preferences where user_id = old.user_id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -137,12 +139,12 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
expect_function_to_contain(function_name,
'delete from issue_assignees where issue_id = old.id',
'delete from epic_issues where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
model.remove_partitioned_foreign_key :issue_assignees, referenced_table
expect_function_to_contain(function_name, 'delete from epic_issues where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -153,12 +155,12 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
it 'removes the trigger function altogether' do
expect_function_to_contain(function_name, 'delete from issue_assignees where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
model.remove_partitioned_foreign_key :issue_assignees, referenced_table
expect(find_function_def(function_name)).to be_nil
expect(find_trigger_def(trigger_name)).to be_nil
expect_function_not_to_exist(function_name)
expect_trigger_not_to_exist(referenced_table, trigger_name)
end
end
......@@ -169,12 +171,12 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
it 'ignores the invalid key and properly recreates the trigger function' do
expect_function_to_contain(function_name, 'delete from issue_assignees where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
model.remove_partitioned_foreign_key :issues, referenced_table, column: :moved_to_id
expect_function_to_contain(function_name, 'delete from issue_assignees where issue_id = old.id')
expect_valid_function_trigger(trigger_name, function_name)
expect_valid_function_trigger(referenced_table, trigger_name, function_name, after: 'delete')
end
end
......@@ -188,45 +190,4 @@ describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers do
end
end
end
def expect_function_to_contain(name, *statements)
return_stmt, *body_stmts = parsed_function_statements(name).reverse
expect(return_stmt).to eq('return old')
expect(body_stmts).to contain_exactly(*statements)
end
def expect_valid_function_trigger(name, fn_name)
event, activation, definition = cleaned_trigger_def(name)
expect(event).to eq('delete')
expect(activation).to eq('after')
expect(definition).to eq("execute procedure #{fn_name}()")
end
def parsed_function_statements(name)
cleaned_definition = find_function_def(name)['fn_body'].downcase.gsub(/\s+/, ' ')
statements = cleaned_definition.sub(/\A\s*begin\s*(.*)\s*end\s*\Z/, "\\1")
statements.split(';').map! { |stmt| stmt.strip.presence }.compact!
end
def find_function_def(name)
connection.execute("select prosrc as fn_body from pg_proc where proname = '#{name}';").first
end
def cleaned_trigger_def(name)
find_trigger_def(name).values_at('event', 'activation', 'definition').map!(&:downcase)
end
def find_trigger_def(name)
connection.execute(<<~SQL).first
select
string_agg(event_manipulation, ',') as event,
action_timing as activation,
action_statement as definition
from information_schema.triggers
where trigger_name = '#{name}'
group by 2, 3
SQL
end
end
......@@ -4,33 +4,63 @@ require 'spec_helper'
describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers do
include PartitioningHelpers
include TriggerHelpers
let(:model) do
let(:migration) do
ActiveRecord::Migration.new.extend(described_class)
end
let_it_be(:connection) { ActiveRecord::Base.connection }
let(:template_table) { :audit_events }
let(:partitioned_table) { '_test_migration_partitioned_table' }
let(:function_name) { '_test_migration_function_name' }
let(:trigger_name) { '_test_migration_trigger_name' }
let(:partition_column) { 'created_at' }
let(:min_date) { Date.new(2019, 12) }
let(:max_date) { Date.new(2020, 3) }
before do
allow(model).to receive(:puts)
allow(model).to receive(:partitioned_table_name).and_return(partitioned_table)
allow(migration).to receive(:puts)
allow(migration).to receive(:transaction_open?).and_return(false)
allow(migration).to receive(:partitioned_table_name).and_return(partitioned_table)
allow(migration).to receive(:sync_function_name).and_return(function_name)
allow(migration).to receive(:sync_trigger_name).and_return(trigger_name)
allow(migration).to receive(:assert_table_is_whitelisted)
end
describe '#partition_table_by_date' do
let(:partition_column) { 'created_at' }
let(:old_primary_key) { 'id' }
let(:new_primary_key) { [old_primary_key, partition_column] }
context 'when the table is not whitelisted' do
let(:template_table) { :this_table_is_not_whitelisted }
it 'raises an error' do
expect(migration).to receive(:assert_table_is_whitelisted).with(template_table).and_call_original
expect do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/#{template_table} is not whitelisted for use/)
end
end
context 'when run inside a transaction block' do
it 'raises an error' do
expect(migration).to receive(:transaction_open?).and_return(true)
expect do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/can not be run inside a transaction/)
end
end
context 'when the the max_date is less than the min_date' do
let(:max_date) { Time.utc(2019, 6) }
it 'raises an error' do
expect do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/max_date #{max_date} must be greater than min_date #{min_date}/)
end
end
......@@ -40,7 +70,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
it 'raises an error' do
expect do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/max_date #{max_date} must be greater than min_date #{min_date}/)
end
end
......@@ -50,13 +80,13 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
let(:partition_column) { :some_field }
it 'raises an error' do
model.create_table template_table, id: false do |t|
migration.create_table template_table, id: false do |t|
t.integer :id
t.datetime partition_column
end
expect do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/primary key not defined for #{template_table}/)
end
end
......@@ -66,14 +96,14 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
it 'raises an error' do
expect do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
end.to raise_error(/partition column #{partition_column} does not exist/)
end
end
context 'when a valid source table and partition column is given' do
describe 'constructing the partitioned table' do
it 'creates a table partitioned by the proper column' do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expect(connection.table_exists?(partitioned_table)).to be(true)
expect(connection.primary_key(partitioned_table)).to eq(new_primary_key)
......@@ -82,7 +112,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
end
it 'removes the default from the primary key column' do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
pk_column = connection.columns(partitioned_table).find { |c| c.name == old_primary_key }
......@@ -90,7 +120,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
end
it 'creates the partitioned table with the same non-key columns' do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
copied_columns = filter_columns_by_name(connection.columns(partitioned_table), new_primary_key)
original_columns = filter_columns_by_name(connection.columns(template_table), new_primary_key)
......@@ -99,7 +129,7 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
end
it 'creates a partition spanning over each month in the range given' do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expect_range_partition_of("#{partitioned_table}_000000", partitioned_table, 'MINVALUE', "'2019-12-01 00:00:00'")
expect_range_partition_of("#{partitioned_table}_201912", partitioned_table, "'2019-12-01 00:00:00'", "'2020-01-01 00:00:00'")
......@@ -107,6 +137,76 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
expect_range_partition_of("#{partitioned_table}_202002", partitioned_table, "'2020-02-01 00:00:00'", "'2020-03-01 00:00:00'")
end
end
describe 'keeping data in sync with the partitioned table' do
let(:template_table) { :todos }
let(:model) { Class.new(ActiveRecord::Base) }
let(:timestamp) { Time.utc(2019, 12, 1, 12).round }
before do
model.primary_key = :id
model.table_name = partitioned_table
end
it 'creates a trigger function on the original table' do
expect_function_not_to_exist(function_name)
expect_trigger_not_to_exist(template_table, trigger_name)
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expect_function_to_exist(function_name)
expect_valid_function_trigger(template_table, trigger_name, function_name, after: %w[delete insert update])
end
it 'syncs inserts to the partitioned tables' do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expect(model.count).to eq(0)
first_todo = create(:todo, created_at: timestamp, updated_at: timestamp)
second_todo = create(:todo, created_at: timestamp, updated_at: timestamp)
expect(model.count).to eq(2)
expect(model.find(first_todo.id).attributes).to eq(first_todo.attributes)
expect(model.find(second_todo.id).attributes).to eq(second_todo.attributes)
end
it 'syncs updates to the partitioned tables' do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
first_todo = create(:todo, :pending, commit_id: nil, created_at: timestamp, updated_at: timestamp)
second_todo = create(:todo, created_at: timestamp, updated_at: timestamp)
expect(model.count).to eq(2)
first_copy = model.find(first_todo.id)
second_copy = model.find(second_todo.id)
expect(first_copy.attributes).to eq(first_todo.attributes)
expect(second_copy.attributes).to eq(second_todo.attributes)
first_todo.update(state_event: 'done', commit_id: 'abc123', updated_at: timestamp + 1.second)
expect(model.count).to eq(2)
expect(first_copy.reload.attributes).to eq(first_todo.attributes)
expect(second_copy.reload.attributes).to eq(second_todo.attributes)
end
it 'syncs deletes to the partitioned tables' do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
first_todo = create(:todo, created_at: timestamp, updated_at: timestamp)
second_todo = create(:todo, created_at: timestamp, updated_at: timestamp)
expect(model.count).to eq(2)
first_todo.destroy
expect(model.count).to eq(1)
expect(model.find_by_id(first_todo.id)).to be_nil
expect(model.find(second_todo.id).attributes).to eq(second_todo.attributes)
end
end
end
describe '#drop_partitioned_table_for' do
......@@ -114,14 +214,38 @@ describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHelpers
%w[000000 201912 202001 202002].map { |suffix| "#{partitioned_table}_#{suffix}" }.unshift(partitioned_table)
end
context 'when the table is not whitelisted' do
let(:template_table) { :this_table_is_not_whitelisted }
it 'raises an error' do
expect(migration).to receive(:assert_table_is_whitelisted).with(template_table).and_call_original
expect do
migration.drop_partitioned_table_for template_table
end.to raise_error(/#{template_table} is not whitelisted for use/)
end
end
it 'drops the trigger syncing to the partitioned table' do
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expect_function_to_exist(function_name)
expect_valid_function_trigger(template_table, trigger_name, function_name, after: %w[delete insert update])
migration.drop_partitioned_table_for template_table
expect_function_not_to_exist(function_name)
expect_trigger_not_to_exist(template_table, trigger_name)
end
it 'drops the partitioned copy and all partitions' do
model.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
migration.partition_table_by_date template_table, partition_column, min_date: min_date, max_date: max_date
expected_tables.each do |table|
expect(connection.table_exists?(table)).to be(true)
end
model.drop_partitioned_table_for template_table
migration.drop_partitioned_table_for template_table
expected_tables.each do |table|
expect(connection.table_exists?(table)).to be(false)
......
......@@ -76,6 +76,8 @@ describe Gitlab::UsageData, :aggregate_failures do
expect(count_data[:issues_with_embedded_grafana_charts_approx]).to eq(2)
expect(count_data[:incident_issues]).to eq(4)
expect(count_data[:issues_created_gitlab_alerts]).to eq(1)
expect(count_data[:issues_created_from_alerts]).to eq(3)
expect(count_data[:issues_created_manually_from_alerts]).to eq(1)
expect(count_data[:alert_bot_incident_issues]).to eq(4)
expect(count_data[:incident_labeled_issues]).to eq(3)
......
......@@ -21,6 +21,9 @@ describe Issue do
it { is_expected.to have_one(:alert_management_alert) }
it { is_expected.to have_many(:resource_milestone_events) }
it { is_expected.to have_many(:resource_state_events) }
it { is_expected.to have_and_belong_to_many(:prometheus_alert_events) }
it { is_expected.to have_and_belong_to_many(:self_managed_prometheus_alert_events) }
it { is_expected.to have_many(:prometheus_alerts) }
describe 'versions.most_recent' do
it 'returns the most recent version' do
......
# frozen_string_literal: true
module TriggerHelpers
def expect_function_to_exist(name)
expect(find_function_def(name)).not_to be_nil
end
def expect_function_not_to_exist(name)
expect(find_function_def(name)).to be_nil
end
def expect_function_to_contain(name, *statements)
return_stmt, *body_stmts = parsed_function_statements(name).reverse
expect(return_stmt).to eq('return old')
expect(body_stmts).to contain_exactly(*statements)
end
def expect_trigger_not_to_exist(table_name, name)
expect(find_trigger_def(table_name, name)).to be_nil
end
def expect_valid_function_trigger(table_name, name, fn_name, fires_on)
events, timing, definition = cleaned_trigger_def(table_name, name)
events = events&.split(',')
expected_timing, expected_events = fires_on.first
expect(timing).to eq(expected_timing.to_s)
expect(events).to match_array(Array.wrap(expected_events))
expect(definition).to eq("execute procedure #{fn_name}()")
end
private
def parsed_function_statements(name)
cleaned_definition = find_function_def(name)['body'].downcase.gsub(/\s+/, ' ')
statements = cleaned_definition.sub(/\A\s*begin\s*(.*)\s*end\s*\Z/, "\\1")
statements.split(';').map! { |stmt| stmt.strip.presence }.compact!
end
def find_function_def(name)
connection.select_one(<<~SQL)
SELECT prosrc AS body
FROM pg_proc
WHERE proname = '#{name}'
SQL
end
def cleaned_trigger_def(table_name, name)
find_trigger_def(table_name, name).values_at('event', 'action_timing', 'action_statement').map!(&:downcase)
end
def find_trigger_def(table_name, name)
connection.select_one(<<~SQL)
SELECT
string_agg(event_manipulation, ',') AS event,
action_timing,
action_statement
FROM information_schema.triggers
WHERE event_object_table = '#{table_name}'
AND trigger_name = '#{name}'
GROUP BY 2, 3
SQL
end
end
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册