diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6ab38a4ddb1..6264ff9611d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,6 +1,7 @@ bootstrap.sh @frouioui -go.mod @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui -go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui +go.mod @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui @timvaillancourt +go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui @timvaillancourt +/.golangci.yml @timvaillancourt /.github/ @mattlord @rohit-nayak-ps @frouioui /.github/ISSUE_TEMPLATE/ @frouioui @mattlord /.github/workflows/ @frouioui @mattlord @rohit-nayak-ps @@ -23,7 +24,7 @@ go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui /go/internal/flag @rohit-nayak-ps /go/mysql @harshit-gangal @systay @mattlord /go/pools @harshit-gangal -/go/protoutil @mattlord +/go/protoutil @mattlord @timvaillancourt /go/sqltypes @harshit-gangal @shlomi-noach /go/test/endtoend/onlineddl @rohit-nayak-ps @shlomi-noach /go/test/endtoend/messaging @mattlord @rohit-nayak-ps @derekperkins @@ -31,45 +32,45 @@ go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui /go/test/endtoend/transaction @harshit-gangal @systay @frouioui /go/test/endtoend/*throttler* @shlomi-noach @mattlord @timvaillancourt /go/test/endtoend/vtgate @harshit-gangal @systay @frouioui -/go/test/endtoend/vtorc @shlomi-noach @timvaillancourt +/go/test/endtoend/vtorc @mattlord @shlomi-noach @timvaillancourt /go/tools/ @frouioui @systay /go/vt/dbconnpool @harshit-gangal @mattlord -/go/vt/discovery @frouioui -/go/vt/discovery/*tablet_picker* @rohit-nayak-ps @mattlord +/go/vt/discovery @frouioui @timvaillancourt +/go/vt/discovery/*tablet_picker* @rohit-nayak-ps @mattlord @timvaillancourt /go/vt/mysqlctl @mattlord @frouioui /go/vt/proto @harshit-gangal @mattlord /go/vt/proto/vtadmin @beingnoble03 /go/vt/schema @mattlord @shlomi-noach -/go/vt/servenv @dbussink +/go/vt/servenv @dbussink @timvaillancourt /go/vt/schemadiff @shlomi-noach @mattlord /go/vt/sqlparser @harshit-gangal @systay -/go/vt/srvtopo @mattlord +/go/vt/srvtopo @mattlord @timvaillancourt /go/vt/sysvars @harshit-gangal @systay -/go/vt/topo @mattlord -/go/vt/topotools @mattlord +/go/vt/topo @mattlord @timvaillancourt +/go/vt/topotools @mattlord @timvaillancourt /go/vt/vitessdriver @harshit-gangal /go/vt/vtadmin @beingnoble03 @rohit-nayak-ps /go/vt/vtctl @rohit-nayak-ps /go/vt/vtctl/vtctl.go @rohit-nayak-ps -/go/vt/vtctl/grpcvtctldclient @mattlord -/go/vt/vtctl/grpcvtctldserver @mattlord -/go/vt/vtctl/reparentutil -/go/vt/vtctl/vtctldclient @mattlord -/go/vt/vtctld @rohit-nayak-ps @mattlord -/go/vt/vterrors @harshit-gangal @systay @frouioui +/go/vt/vtctl/grpcvtctldclient @mattlord @timvaillancourt +/go/vt/vtctl/grpcvtctldserver @mattlord @timvaillancourt +/go/vt/vtctl/reparentutil @timvaillancourt +/go/vt/vtctl/vtctldclient @mattlord @timvaillancourt +/go/vt/vtctld @rohit-nayak-ps @mattlord @timvaillancourt +/go/vt/vterrors @harshit-gangal @systay @frouioui @timvaillancourt /go/vt/vtexplain @systay @harshit-gangal /go/vt/vtgate @harshit-gangal @systay @frouioui /go/vt/vtgate/endtoend/*vstream* @rohit-nayak-ps @mattlord @shlomi-noach @beingnoble03 /go/vt/vtgate/planbuilder @harshit-gangal @systay @frouioui @arthurschreiber /go/vt/vtgate/*vstream* @rohit-nayak-ps @mattlord @shlomi-noach @beingnoble03 /go/vt/vtgate/evalengine @dbussink @systay -/go/vt/vtorc @shlomi-noach @timvaillancourt +/go/vt/vtorc @mattlord @shlomi-noach @timvaillancourt /go/vt/vttablet/*conn* @harshit-gangal @systay /go/vt/vttablet/endtoend @harshit-gangal @mattlord @rohit-nayak-ps @systay -/go/vt/vttablet/grpc* @rohit-nayak-ps @shlomi-noach @harshit-gangal +/go/vt/vttablet/grpc* @rohit-nayak-ps @shlomi-noach @harshit-gangal @timvaillancourt /go/vt/vttablet/onlineddl @mattlord @rohit-nayak-ps @shlomi-noach /go/vt/vttablet/queryservice @harshit-gangal @systay -/go/vt/vttablet/tabletmanager @rohit-nayak-ps @shlomi-noach +/go/vt/vttablet/tabletmanager @rohit-nayak-ps @shlomi-noach @timvaillancourt /go/vt/vttablet/tabletmanager/rpc_backup.go @rohit-nayak-ps @shlomi-noach @frouioui /go/vt/vttablet/tabletmanager/rpc_throttler.go @shlomi-noach @mattlord @timvaillancourt /go/vt/vttablet/tabletserver/throttle @shlomi-noach @mattlord @timvaillancourt @@ -78,7 +79,7 @@ go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui /go/vt/vttablet/tabletmanager/vstreamer @rohit-nayak-ps @mattlord @shlomi-noach @beingnoble03 /go/vt/vttablet/tabletserver* @harshit-gangal @systay @shlomi-noach @rohit-nayak-ps @timvaillancourt /go/vt/vttablet/tabletserver/messager @mattlord @rohit-nayak-ps @derekperkins -/go/vt/vttablet/*tmclient* @rohit-nayak-ps @shlomi-noach +/go/vt/vttablet/*tmclient* @rohit-nayak-ps @shlomi-noach @timvaillancourt /go/vt/vttablet/vexec @mattlord @rohit-nayak-ps @shlomi-noach /go/vt/wrangler @mattlord @rohit-nayak-ps /go/vt/vtctl/workflow @mattlord @rohit-nayak-ps @shlomi-noach @beingnoble03 @@ -86,6 +87,7 @@ go.sum @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui /proto/vtadmin.proto @beingnoble03 @mattlord /proto/vtctldata.proto @mattlord /proto/vtctlservice.proto @mattlord +/proto/vtorcdata.proto @mattlord @shlomi-noach @timvaillancourt /test/ @frouioui @rohit-nayak-ps @mattlord @harshit-gangal /tools/ @frouioui @rohit-nayak-ps /web/vtadmin @beingnoble03 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 00000000000..c7264d404ab --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,594 @@ +name: Backport and Forwardport PRs + +on: + # pull_request: + # types: [closed] + workflow_dispatch: + inputs: + pr_number: + description: 'Pull Request number to backport/forwardport' + required: true + type: number + dry_run: + description: 'Dry run mode - show what would happen without making changes' + required: false + type: boolean + default: true + +permissions: + contents: write + pull-requests: write + issues: write + +jobs: + backport: + name: Backport/Forwardport PR + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Git + run: | + git config --global user.name "vitess-bot[bot]" + git config --global user.email "108069721+vitess-bot[bot]@users.noreply.github.com" + + - name: Get PR information + id: pr-info + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Determine PR number and dry-run mode based on trigger type + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + PR_NUMBER="${{ inputs.pr_number }}" + DRY_RUN="${{ inputs.dry_run }}" + else + PR_NUMBER="${{ github.event.pull_request.number }}" + DRY_RUN="false" + fi + + echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT + echo "dry_run=$DRY_RUN" >> $GITHUB_OUTPUT + + if [ "$DRY_RUN" = "true" ]; then + echo "🔍 DRY RUN MODE ENABLED - No changes will be made" + echo "==================================================" + fi + + # Fetch PR details from API + PR_DATA=$(gh pr view "$PR_NUMBER" --json number,title,author,mergeCommit,merged,state,repository) + + # Extract PR details + PR_TITLE=$(echo "$PR_DATA" | jq -r '.title') + PR_AUTHOR=$(echo "$PR_DATA" | jq -r '.author.login') + MERGE_COMMIT_SHA=$(echo "$PR_DATA" | jq -r '.mergeCommit.oid') + IS_MERGED=$(echo "$PR_DATA" | jq -r '.merged') + REPO_OWNER=$(echo "$PR_DATA" | jq -r '.repository.owner.login') + REPO_NAME=$(echo "$PR_DATA" | jq -r '.repository.name') + + # Validate PR is merged + if [ "$IS_MERGED" != "true" ]; then + echo "Error: PR #$PR_NUMBER is not merged. Cannot backport unmerged PRs." + exit 1 + fi + + # Validate repository (only vitess) + if [ "$REPO_NAME" != "vitess" ]; then + echo "Error: PR #$PR_NUMBER is from repository $REPO_OWNER/$REPO_NAME, not vitess. This workflow only supports vitessio/vitess." + exit 1 + fi + + # Export PR information + echo "pr_title=$PR_TITLE" >> $GITHUB_OUTPUT + echo "pr_author=$PR_AUTHOR" >> $GITHUB_OUTPUT + echo "merge_commit_sha=$MERGE_COMMIT_SHA" >> $GITHUB_OUTPUT + echo "repo_owner=$REPO_OWNER" >> $GITHUB_OUTPUT + echo "repo_name=$REPO_NAME" >> $GITHUB_OUTPUT + + echo "Processing PR #$PR_NUMBER: $PR_TITLE" + echo "Author: $PR_AUTHOR" + echo "Merge commit: $MERGE_COMMIT_SHA" + + - name: Extract backport and forwardport labels + id: extract-labels + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_NUMBER="${{ steps.pr-info.outputs.pr_number }}" + + # Get all labels from the PR + LABELS=$(gh pr view "$PR_NUMBER" --json labels --jq '.labels[].name') + + # Extract backport branches + BACKPORT_BRANCHES=$(echo "$LABELS" | grep "^Backport to: " | sed 's/^Backport to: //' || true) + FORWARDPORT_BRANCHES=$(echo "$LABELS" | grep "^Forwardport to: " | sed 's/^Forwardport to: //' || true) + + # Extract other labels (excluding backport/forwardport labels) + OTHER_LABELS=$(echo "$LABELS" | grep -v "^Backport to: " | grep -v "^Forwardport to: " | jq -R -s -c 'split("\n") | map(select(length > 0))' || echo '[]') + + # Export as outputs + echo "backport_branches<> $GITHUB_OUTPUT + echo "$BACKPORT_BRANCHES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "forwardport_branches<> $GITHUB_OUTPUT + echo "$FORWARDPORT_BRANCHES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "other_labels=$OTHER_LABELS" >> $GITHUB_OUTPUT + + if [ -n "$BACKPORT_BRANCHES" ]; then + echo "Will backport PR #$PR_NUMBER to branches: $BACKPORT_BRANCHES" + fi + if [ -n "$FORWARDPORT_BRANCHES" ]; then + echo "Will forwardport PR #$PR_NUMBER to branches: $FORWARDPORT_BRANCHES" + fi + + - name: Process backports + if: steps.extract-labels.outputs.backport_branches != '' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_NUMBER="${{ steps.pr-info.outputs.pr_number }}" + PR_TITLE="${{ steps.pr-info.outputs.pr_title }}" + PR_AUTHOR="${{ steps.pr-info.outputs.pr_author }}" + MERGE_COMMIT_SHA="${{ steps.pr-info.outputs.merge_commit_sha }}" + OTHER_LABELS='${{ steps.extract-labels.outputs.other_labels }}' + DRY_RUN="${{ steps.pr-info.outputs.dry_run }}" + + if [ "$DRY_RUN" = "true" ]; then + echo "🔍 DRY RUN: Backport Processing" + echo "================================" + fi + + # Read backport branches + BACKPORT_BRANCHES="${{ steps.extract-labels.outputs.backport_branches }}" + + # Process each backport branch + while IFS= read -r BRANCH; do + [ -z "$BRANCH" ] && continue + + echo "Processing backport to branch: $BRANCH" + + PORT_TYPE="backport" + NEW_BRANCH="${PORT_TYPE}-${PR_NUMBER}-to-${BRANCH}" + + # Fetch the target branch + git fetch origin "$BRANCH:$BRANCH" || { + echo "Error: Failed to fetch branch $BRANCH" + continue + } + + # Create and checkout new branch from target branch + git checkout -b "$NEW_BRANCH" "$BRANCH" || { + echo "Error: Failed to create branch $NEW_BRANCH" + continue + } + + # Attempt cherry-pick + CONFLICT=false + if ! git cherry-pick -m 1 "$MERGE_COMMIT_SHA" 2>&1; then + # Check if there are conflicts + if git status | grep -q "Unmerged paths\|both modified"; then + echo "Conflicts detected during cherry-pick" + CONFLICT=true + + # Stage all changes + git add . + + # Commit with conflict message + git commit --author="vitess-bot[bot] <108069721+vitess-bot[bot]@users.noreply.github.com>" \ + -m "Cherry-pick $MERGE_COMMIT_SHA with conflicts" || { + echo "Error: Failed to commit conflicts" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + else + echo "Error: Cherry-pick failed with non-conflict error" + git cherry-pick --abort 2>/dev/null || true + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + fi + else + # Cherry-pick succeeded, amend to update author + git commit --amend --no-edit --author="vitess-bot[bot] <108069721+vitess-bot[bot]@users.noreply.github.com>" || { + echo "Error: Failed to amend commit" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + fi + + # Push the new branch + if [ "$DRY_RUN" = "true" ]; then + echo " [DRY RUN] Would push branch: $NEW_BRANCH" + echo " Command: git push -f origin $NEW_BRANCH" + NEW_PR_NUMBER="" + else + git push -f origin "$NEW_BRANCH" || { + echo "Error: Failed to push branch $NEW_BRANCH" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + + # Create PR body + PR_BODY="## Description + This is a $PORT_TYPE of #${PR_NUMBER}" + + # Determine if PR should be draft + DRAFT_FLAG="" + if [ "$CONFLICT" = true ]; then + DRAFT_FLAG="--draft" + fi + + echo " Creating pull request..." + # Create the pull request + NEW_PR_NUMBER=$(gh pr create \ + --title "[$BRANCH] $PR_TITLE (#$PR_NUMBER)" \ + --body "$PR_BODY" \ + --base "$BRANCH" \ + --head "$NEW_BRANCH" \ + $DRAFT_FLAG \ + --repo "${{ github.repository }}" \ + --json number --jq '.number' 2>/dev/null || echo "") + + if [ -z "$NEW_PR_NUMBER" ]; then + echo "Error: Failed to create PR for branch $NEW_BRANCH" + git checkout main + continue + fi + + echo "Created backport PR #$NEW_PR_NUMBER" + fi + + # Display PR information (for both dry-run and real mode) + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would create PR with:" + echo " --------------------------------" + echo " Title: [$BRANCH] $PR_TITLE (#$PR_NUMBER)" + echo " Body: ## Description" + echo " This is a $PORT_TYPE of #${PR_NUMBER}" + echo " Base: $BRANCH" + echo " Head: $NEW_BRANCH" + if [ "$CONFLICT" = true ]; then + echo " Draft: true (due to conflicts)" + else + echo " Draft: false" + fi + echo " Repository: ${{ github.repository }}" + fi + + # Add labels - build array from OTHER_LABELS and add port type specific labels + LABELS_ARRAY=() + + # Add labels from original PR + while IFS= read -r label; do + [ -n "$label" ] && LABELS_ARRAY+=("$label") + done < <(echo "$OTHER_LABELS" | jq -r '.[]' 2>/dev/null || true) + + # Add conflict-specific labels + if [ "$CONFLICT" = true ]; then + LABELS_ARRAY+=("Merge Conflict" "Skip CI" "Backport") + else + LABELS_ARRAY+=("Backport") + fi + + # Apply labels + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would add labels:" + for label in "${LABELS_ARRAY[@]}"; do + echo " - $label" + done + else + # Apply labels one by one to handle potential issues + for label in "${LABELS_ARRAY[@]}"; do + gh pr edit "$NEW_PR_NUMBER" --add-label "$label" --repo "${{ github.repository }}" 2>/dev/null || echo "Warning: Could not add label '$label'" + done + fi + + # Add conflict comment if there were conflicts + if [ "$CONFLICT" = true ]; then + CONFLICT_COMMENT="Hello @${PR_AUTHOR}, there are conflicts in this ${PORT_TYPE}. + + Please address them in order to merge this Pull Request. You can execute the snippet below to reset your branch and resolve the conflict manually. + + Make sure you replace \`origin\` by the name of the ${{ github.repository_owner }}/${{ github.event.repository.name }} remote + \`\`\` + git fetch --all + gh pr checkout ${NEW_PR_NUMBER} -R ${{ github.repository }} + git reset --hard origin/${BRANCH} + git cherry-pick -m 1 ${MERGE_COMMIT_SHA} + \`\`\`" + + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would add conflict resolution comment:" + echo " ------------------------------------------------" + echo "$CONFLICT_COMMENT" | sed 's/^/ /' + else + gh pr comment "$NEW_PR_NUMBER" --body "$CONFLICT_COMMENT" --repo "${{ github.repository }}" || true + fi + fi + + # Get reviewers from original PR and add original author + if [ "$DRY_RUN" = "true" ]; then + REVIEWER_USERS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.users[].login' 2>/dev/null || true) + REVIEWER_TEAMS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.teams[].slug' 2>/dev/null || true) + + echo "" + echo " [DRY RUN] Would request reviews from:" + [ -n "$REVIEWER_USERS" ] && echo "$REVIEWER_USERS" | while read -r user; do + [ -n "$user" ] && echo " - $user (user)" + done + [ -n "$REVIEWER_TEAMS" ] && echo "$REVIEWER_TEAMS" | while read -r team; do + [ -n "$team" ] && echo " - $team (team)" + done + echo " - $PR_AUTHOR (original PR author)" + else + REVIEWER_USERS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.users[].login' 2>/dev/null || true) + REVIEWER_TEAMS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.teams[].slug' 2>/dev/null || true) + + # Build reviewers list including original author + REVIEWERS=() + while IFS= read -r reviewer; do + [ -n "$reviewer" ] && REVIEWERS+=("$reviewer") + done < <(echo "$REVIEWER_USERS") + while IFS= read -r team; do + [ -n "$team" ] && REVIEWERS+=("$team") + done < <(echo "$REVIEWER_TEAMS") + REVIEWERS+=("$PR_AUTHOR") + + # Request reviewers + for reviewer in "${REVIEWERS[@]}"; do + gh pr edit "$NEW_PR_NUMBER" --add-reviewer "$reviewer" --repo "${{ github.repository }}" 2>/dev/null || echo "Note: Could not add reviewer '$reviewer' (may be PR author or have insufficient permissions)" + done + fi + + # Return to main branch for next iteration + git checkout main + done <<< "$BACKPORT_BRANCHES" + + - name: Process forwardports + if: steps.extract-labels.outputs.forwardport_branches != '' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_NUMBER="${{ steps.pr-info.outputs.pr_number }}" + PR_TITLE="${{ steps.pr-info.outputs.pr_title }}" + PR_AUTHOR="${{ steps.pr-info.outputs.pr_author }}" + MERGE_COMMIT_SHA="${{ steps.pr-info.outputs.merge_commit_sha }}" + OTHER_LABELS='${{ steps.extract-labels.outputs.other_labels }}' + DRY_RUN="${{ steps.pr-info.outputs.dry_run }}" + + if [ "$DRY_RUN" = "true" ]; then + echo "🔍 DRY RUN: Forwardport Processing" + echo "===================================" + fi + + # Read forwardport branches + FORWARDPORT_BRANCHES="${{ steps.extract-labels.outputs.forwardport_branches }}" + + # Process each forwardport branch + while IFS= read -r BRANCH; do + [ -z "$BRANCH" ] && continue + + echo "Processing forwardport to branch: $BRANCH" + + PORT_TYPE="forwardport" + NEW_BRANCH="${PORT_TYPE}-${PR_NUMBER}-to-${BRANCH}" + + # Fetch the target branch + git fetch origin "$BRANCH:$BRANCH" || { + echo "Error: Failed to fetch branch $BRANCH" + continue + } + + # Create and checkout new branch from target branch + git checkout -b "$NEW_BRANCH" "$BRANCH" || { + echo "Error: Failed to create branch $NEW_BRANCH" + continue + } + + # Attempt cherry-pick + CONFLICT=false + if ! git cherry-pick -m 1 "$MERGE_COMMIT_SHA" 2>&1; then + # Check if there are conflicts + if git status | grep -q "Unmerged paths\|both modified"; then + echo "Conflicts detected during cherry-pick" + CONFLICT=true + + # Stage all changes + git add . + + # Commit with conflict message + git commit --author="vitess-bot[bot] <108069721+vitess-bot[bot]@users.noreply.github.com>" \ + -m "Cherry-pick $MERGE_COMMIT_SHA with conflicts" || { + echo "Error: Failed to commit conflicts" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + else + echo "Error: Cherry-pick failed with non-conflict error" + git cherry-pick --abort 2>/dev/null || true + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + fi + else + # Cherry-pick succeeded, amend to update author + git commit --amend --no-edit --author="vitess-bot[bot] <108069721+vitess-bot[bot]@users.noreply.github.com>" || { + echo "Error: Failed to amend commit" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + fi + + # Push the new branch + if [ "$DRY_RUN" = "true" ]; then + echo " [DRY RUN] Would push branch: $NEW_BRANCH" + echo " Command: git push -f origin $NEW_BRANCH" + NEW_PR_NUMBER="" + else + git push -f origin "$NEW_BRANCH" || { + echo "Error: Failed to push branch $NEW_BRANCH" + git checkout main + git branch -D "$NEW_BRANCH" 2>/dev/null || true + continue + } + + # Create PR body + PR_BODY="## Description + This is a $PORT_TYPE of #${PR_NUMBER}" + + # Determine if PR should be draft + DRAFT_FLAG="" + if [ "$CONFLICT" = true ]; then + DRAFT_FLAG="--draft" + fi + + echo " Creating pull request..." + # Create the pull request + NEW_PR_NUMBER=$(gh pr create \ + --title "[$BRANCH] $PR_TITLE (#$PR_NUMBER)" \ + --body "$PR_BODY" \ + --base "$BRANCH" \ + --head "$NEW_BRANCH" \ + $DRAFT_FLAG \ + --repo "${{ github.repository }}" \ + --json number --jq '.number' 2>/dev/null || echo "") + + if [ -z "$NEW_PR_NUMBER" ]; then + echo "Error: Failed to create PR for branch $NEW_BRANCH" + git checkout main + continue + fi + + echo "Created forwardport PR #$NEW_PR_NUMBER" + fi + + # Display PR information (for both dry-run and real mode) + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would create PR with:" + echo " --------------------------------" + echo " Title: [$BRANCH] $PR_TITLE (#$PR_NUMBER)" + echo " Body: ## Description" + echo " This is a $PORT_TYPE of #${PR_NUMBER}" + echo " Base: $BRANCH" + echo " Head: $NEW_BRANCH" + if [ "$CONFLICT" = true ]; then + echo " Draft: true (due to conflicts)" + else + echo " Draft: false" + fi + echo " Repository: ${{ github.repository }}" + fi + + # Add labels - build array from OTHER_LABELS and add port type specific labels + LABELS_ARRAY=() + + # Add labels from original PR + while IFS= read -r label; do + [ -n "$label" ] && LABELS_ARRAY+=("$label") + done < <(echo "$OTHER_LABELS" | jq -r '.[]' 2>/dev/null || true) + + # Add conflict-specific labels + if [ "$CONFLICT" = true ]; then + LABELS_ARRAY+=("Merge Conflict" "Skip CI" "Forwardport") + else + LABELS_ARRAY+=("Forwardport") + fi + + # Apply labels + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would add labels:" + for label in "${LABELS_ARRAY[@]}"; do + echo " - $label" + done + else + # Apply labels one by one to handle potential issues + for label in "${LABELS_ARRAY[@]}"; do + gh pr edit "$NEW_PR_NUMBER" --add-label "$label" --repo "${{ github.repository }}" 2>/dev/null || echo "Warning: Could not add label '$label'" + done + fi + + # Add conflict comment if there were conflicts + if [ "$CONFLICT" = true ]; then + CONFLICT_COMMENT="Hello @${PR_AUTHOR}, there are conflicts in this ${PORT_TYPE}. + + Please address them in order to merge this Pull Request. You can execute the snippet below to reset your branch and resolve the conflict manually. + + Make sure you replace \`origin\` by the name of the ${{ github.repository_owner }}/${{ github.event.repository.name }} remote + \`\`\` + git fetch --all + gh pr checkout ${NEW_PR_NUMBER} -R ${{ github.repository }} + git reset --hard origin/${BRANCH} + git cherry-pick -m 1 ${MERGE_COMMIT_SHA} + \`\`\`" + + if [ "$DRY_RUN" = "true" ]; then + echo "" + echo " [DRY RUN] Would add conflict resolution comment:" + echo " ------------------------------------------------" + echo "$CONFLICT_COMMENT" | sed 's/^/ /' + else + gh pr comment "$NEW_PR_NUMBER" --body "$CONFLICT_COMMENT" --repo "${{ github.repository }}" || true + fi + fi + + # Get reviewers from original PR and add original author + if [ "$DRY_RUN" = "true" ]; then + REVIEWER_USERS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.users[].login' 2>/dev/null || true) + REVIEWER_TEAMS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.teams[].slug' 2>/dev/null || true) + + echo "" + echo " [DRY RUN] Would request reviews from:" + [ -n "$REVIEWER_USERS" ] && echo "$REVIEWER_USERS" | while read -r user; do + [ -n "$user" ] && echo " - $user (user)" + done + [ -n "$REVIEWER_TEAMS" ] && echo "$REVIEWER_TEAMS" | while read -r team; do + [ -n "$team" ] && echo " - $team (team)" + done + echo " - $PR_AUTHOR (original PR author)" + else + REVIEWER_USERS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.users[].login' 2>/dev/null || true) + REVIEWER_TEAMS=$(gh api "repos/${{ github.repository }}/pulls/${PR_NUMBER}/requested_reviewers" \ + --jq '.teams[].slug' 2>/dev/null || true) + + # Build reviewers list including original author + REVIEWERS=() + while IFS= read -r reviewer; do + [ -n "$reviewer" ] && REVIEWERS+=("$reviewer") + done < <(echo "$REVIEWER_USERS") + while IFS= read -r team; do + [ -n "$team" ] && REVIEWERS+=("$team") + done < <(echo "$REVIEWER_TEAMS") + REVIEWERS+=("$PR_AUTHOR") + + # Request reviewers + for reviewer in "${REVIEWERS[@]}"; do + gh pr edit "$NEW_PR_NUMBER" --add-reviewer "$reviewer" --repo "${{ github.repository }}" 2>/dev/null || echo "Note: Could not add reviewer '$reviewer' (may be PR author or have insufficient permissions)" + done + fi + + # Return to main branch for next iteration + git checkout main + done <<< "$FORWARDPORT_BRANCHES" diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index 21e5f3daf3f..3774ea0aeb5 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index 92d4d39e384..3cd20134dae 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index 54ce066fd0f..e47bf5e1886 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index 126b65346d8..49a1da1a64e 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Installing zookeeper and consul if: steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index fb2dcedf338..46910787ee9 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Install Minio run: | diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml index a12fbff48b6..39ddda77a24 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysqlshell.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysqlshell.yml index ad7233c6b5d..929c64d2397 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_mysqlshell.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_mysqlshell.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml index bea73382a26..73dfb3f50a8 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml @@ -105,7 +105,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml index 5b92d325424..32dac2895fd 100644 --- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml +++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_mysql84.yml b/.github/workflows/cluster_endtoend_mysql84.yml index e6efcf739f7..d30b4b6d1e2 100644 --- a/.github/workflows/cluster_endtoend_mysql84.yml +++ b/.github/workflows/cluster_endtoend_mysql84.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml index 140c7b0e506..36083275f76 100644 --- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml +++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Installing zookeeper and consul if: steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index 4405f989640..85e5e4e19a9 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml index 11f7e31be57..a91ef221916 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index 3b7daba2db7..c942b938847 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index 0ad0f5c35c9..94f3b89fa94 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml index 9a266e1ee31..6222c257a07 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml index 35e879254a2..1729148420d 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml index 54aa074b31f..cd210edc7a5 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml @@ -97,7 +97,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml index 985b4b0426b..c07c74493ee 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Installing zookeeper and consul if: steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml index 0a98589fddf..9d1ba489411 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml index c83463219d2..9d4dd0ce240 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml index 972b1656ffa..96ee6a40d2e 100644 --- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml +++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml index 2cf964b8b55..ad040bf4768 100644 --- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml +++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index c7b9a534dd2..7cbc602016d 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index 8eba516bcd1..ef846d9e007 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml index f140d2ec70a..42c4f00e8c2 100644 --- a/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml +++ b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml index 3f78447e4ec..8a187105297 100644 --- a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml +++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml index eef2e2688f1..1475234342f 100644 --- a/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml +++ b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate.yml b/.github/workflows/cluster_endtoend_vreplication_migrate.yml index bbf0c99a7fd..316aacda3e6 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml index dea317e3e6e..76d57d15909 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml index 5f9fecc4e64..43e49ecb1b3 100644 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index 98e2a51b4bd..ebaf38ed092 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_vdiff2.yml b/.github/workflows/cluster_endtoend_vreplication_vdiff2.yml index f79584ca1cd..a8a46bd2567 100644 --- a/.github/workflows/cluster_endtoend_vreplication_vdiff2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_vdiff2.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vreplication_vtctldclient_movetables_tz.yml b/.github/workflows/cluster_endtoend_vreplication_vtctldclient_movetables_tz.yml index bc549979f4c..0bd00879b00 100644 --- a/.github/workflows/cluster_endtoend_vreplication_vtctldclient_movetables_tz.yml +++ b/.github/workflows/cluster_endtoend_vreplication_vtctldclient_movetables_tz.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vstream.yml b/.github/workflows/cluster_endtoend_vstream.yml index b2fe9b30515..6e2188b653b 100644 --- a/.github/workflows/cluster_endtoend_vstream.yml +++ b/.github/workflows/cluster_endtoend_vstream.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml index f9ae73f0845..0f0d1bdf2c7 100644 --- a/.github/workflows/cluster_endtoend_vtbackup.yml +++ b/.github/workflows/cluster_endtoend_vtbackup.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml index 9e2f36d3a24..0c35805d26f 100644 --- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml index 305744a02ec..7968f9502b9 100644 --- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml index 3d57a1f08fd..5d791913e13 100644 --- a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml +++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml index 3329dde0eda..d8c93ad46c2 100644 --- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml index a65f484527d..28880373594 100644 --- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml index 30f96b98abf..d029349d4b9 100644 --- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml +++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml index bce3096d51b..11282e7b24f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml +++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_plantests.yml b/.github/workflows/cluster_endtoend_vtgate_plantests.yml index 40f3b4d3297..5481e36d625 100644 --- a/.github/workflows/cluster_endtoend_vtgate_plantests.yml +++ b/.github/workflows/cluster_endtoend_vtgate_plantests.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml index b9212aed370..6e1c6c0512f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_queries.yml +++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml index 93f2b9f82c5..758600d017a 100644 --- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml index 307533195dd..60757ac9624 100644 --- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml index 1f1121951cf..41e089e0aaa 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml index 083e555159e..28adf2d5b2d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml index 01a40941cba..c6b42441519 100644 --- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml +++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml index 8ed6221ab2e..c2b6904557d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml index 09928da3ae0..33e3155e091 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Installing zookeeper and consul if: steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml index 6de86d2f30c..abb86ec1a29 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml index cfabc51b7f7..1c501b6ee22 100644 --- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml index b4185cf74cd..57ade362b6d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml index 123dccbc137..2ddcad87416 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml index a3b3a66ca46..907c54e906b 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml index 8975c01b45f..05a8dbc3f2f 100644 --- a/.github/workflows/cluster_endtoend_vtorc.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -105,7 +105,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml index 784d90f5150..a76c91c9cb9 100644 --- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml +++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml @@ -96,7 +96,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml index 22de31fcad1..2ca8c183ab1 100644 --- a/.github/workflows/cluster_endtoend_xb_backup.yml +++ b/.github/workflows/cluster_endtoend_xb_backup.yml @@ -105,7 +105,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml index ee2627f0b09..2239172b633 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -105,7 +105,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Setup launchable dependencies if: github.event_name == 'pull_request' && github.event.pull_request.draft == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml index 9ba4a7d4eee..4efaa60b8a7 100644 --- a/.github/workflows/static_checks_etc.yml +++ b/.github/workflows/static_checks_etc.yml @@ -186,21 +186,15 @@ jobs: git status test -z "$(git diff-index --name-only HEAD --)" || exit 1 - - name: Install golangci-lint - if: steps.changes.outputs.go_files == 'true' - run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.6 - - - name: Clean Env - if: steps.changes.outputs.go_files == 'true' - run: $(go env GOPATH)/bin/golangci-lint cache clean - - - name: Print linter version - if: steps.changes.outputs.go_files == 'true' - run: $(go env GOPATH)/bin/golangci-lint --version - - name: Run golangci-lint if: steps.changes.outputs.go_files == 'true' - run: $(go env GOPATH)/bin/golangci-lint run go/... --timeout 10m || exit 1 + uses: golangci/golangci-lint-action@v9 + with: + args: --timeout 10m + install-mode: "goinstall" + skip-cache: true + working-directory: go + version: v2.1.6 - name: Run go mod tidy if: steps.changes.outputs.go_files == 'true' diff --git a/.github/workflows/unit_test_evalengine_mysql57.yml b/.github/workflows/unit_test_evalengine_mysql57.yml index 2badca8f41f..4498049659e 100644 --- a/.github/workflows/unit_test_evalengine_mysql57.yml +++ b/.github/workflows/unit_test_evalengine_mysql57.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/unit_test_evalengine_mysql80.yml b/.github/workflows/unit_test_evalengine_mysql80.yml index 6687ed89398..27089276b7b 100644 --- a/.github/workflows/unit_test_evalengine_mysql80.yml +++ b/.github/workflows/unit_test_evalengine_mysql80.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/unit_test_evalengine_mysql84.yml b/.github/workflows/unit_test_evalengine_mysql84.yml index f9b044417c3..53a05b089fb 100644 --- a/.github/workflows/unit_test_evalengine_mysql84.yml +++ b/.github/workflows/unit_test_evalengine_mysql84.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index 0fe3f2ed1e7..2be9fd49b53 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index 00eadf05ce8..6f19f0671a3 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/unit_test_mysql84.yml b/.github/workflows/unit_test_mysql84.yml index f8407039c46..6cc501a831b 100644 --- a/.github/workflows/unit_test_mysql84.yml +++ b/.github/workflows/unit_test_mysql84.yml @@ -92,7 +92,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index deaf1d56de9..7dcfdb967c7 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -62,20 +62,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_backups_e2e.yml' - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -93,11 +87,6 @@ jobs: sudo service etcd stop - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb @@ -113,6 +102,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -135,6 +131,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index 8c3d32c7b68..e90607d7dfd 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -64,20 +64,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -95,11 +89,6 @@ jobs: sudo service etcd stop - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb @@ -115,6 +104,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -137,6 +133,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index 3482ed899f9..decdced85dd 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -37,13 +37,6 @@ jobs: fetch-depth: 0 persist-credentials: 'false' - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - - name: Check for changes in relevant files uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 id: changes @@ -66,20 +59,23 @@ jobs: - '.github/workflows/upgrade_downgrade_test_backups_manual.yml' - 'examples/**' - - name: Set up Go - if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 - - name: Set up python + - name: Set output with latest release branch if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + id: output-previous-release-ref + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/tune-os + - name: Set up python + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +93,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb @@ -115,6 +108,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -137,6 +137,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | @@ -278,7 +285,7 @@ jobs: echo "select count(sku) from corder;" | mysql 2>&1| grep 6 - name: Stop the Vitess cluster - if: always() && steps.changes.outputs.end_to_end == 'true' + if: steps.changes.outputs.end_to_end == 'true' && !cancelled() run: | source build.env ; cd examples/local ./401_teardown.sh || true diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index 54340adb23b..f91a1b85287 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -37,13 +37,6 @@ jobs: fetch-depth: 0 persist-credentials: 'false' - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT - - name: Check for changes in relevant files if: steps.output-next-release-ref.outputs.next_release_ref != '' uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 @@ -67,20 +60,22 @@ jobs: - '.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml' - 'examples/**' - - name: Set up Go + - name: Set output with latest release branch + id: output-next-release-ref + if: steps.changes.outputs.end_to_end == 'true' + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -98,9 +93,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb @@ -116,6 +108,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -138,6 +137,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -279,7 +285,7 @@ jobs: echo "select count(sku) from corder;" | mysql 2>&1| grep 6 - name: Stop the Vitess cluster - if: always() && steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' && !cancelled() run: | source build.env ; cd examples/local ./401_teardown.sh || true diff --git a/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml b/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml index 7986628bdb6..881d21379a4 100644 --- a/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml +++ b/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml @@ -60,6 +60,7 @@ jobs: - '.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml' - name: Set output with latest release branch + if: steps.changes.outputs.end_to_end == 'true' id: output-previous-release-ref run: | previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) @@ -67,26 +68,21 @@ jobs: echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Set output with next release branch + if: steps.changes.outputs.end_to_end == 'true' id: output-next-release-ref run: | next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -104,9 +100,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the last release of Vitess - name: Check out last version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.changes.outputs.end_to_end == 'true' @@ -115,6 +108,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -139,6 +139,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -162,6 +169,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 03914d909dc..6ec87d3b7e1 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -66,20 +66,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_queries.yml' - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +91,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Build current commit's binaries - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' @@ -124,6 +115,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml index dba898ca5ad..584c2fd077d 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml @@ -18,7 +18,6 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - upgrade_downgrade_test: timeout-minutes: 60 name: Run Upgrade Downgrade Test - Query Serving (Queries - 2) @@ -38,13 +37,6 @@ jobs: fetch-depth: 0 persist-credentials: 'false' - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - - name: Check for changes in relevant files uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 id: changes @@ -66,20 +58,22 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_queries.yml' - - name: Set up Go - if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 - - - name: Set up python + - name: Set output with latest release branch if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + id: output-previous-release-ref + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/tune-os + - name: Set up python + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +91,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Build current commit's binaries - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' @@ -124,6 +115,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml index fb4ce357ba1..5ee0dd15fd8 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml @@ -67,20 +67,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -98,9 +92,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the next release of Vitess - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' @@ -109,6 +100,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -131,6 +129,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index dc7c64cb3ff..b85a225a6a6 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -67,20 +67,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -98,9 +92,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the next release of Vitess - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' @@ -109,6 +100,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -131,6 +129,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 9b09fb257d7..86a0de410a2 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -66,20 +66,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_schema.yml' - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +91,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the last release of Vitess - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.changes.outputs.end_to_end == 'true' @@ -108,6 +99,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -130,6 +128,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index e578430f55a..e4a1e96744b 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -67,26 +67,20 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql with: flavor: mysql-8.4 - + - name: Get base dependencies timeout-minutes: 10 if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' @@ -98,9 +92,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the next release of Vitess - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' @@ -109,6 +100,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -131,6 +129,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index 1528843c9a5..892db9ce892 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -67,20 +67,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -98,9 +92,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the next release of Vitess - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' @@ -109,6 +100,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -131,6 +129,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index e3549385f00..a92ff81c84a 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -67,20 +67,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml' - - name: Set up Go + - name: Tune the OS if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version-file: go.mod + uses: ./.github/actions/tune-os - name: Set up python if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -98,9 +92,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb @@ -116,6 +107,13 @@ jobs: ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the next release if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | @@ -138,6 +136,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.output-next-release-ref.outputs.next_release_ref != '' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index b9c9e9690e9..708d7c9a4b1 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -66,20 +66,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml' - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +91,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the last release of Vitess - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.changes.outputs.end_to_end == 'true' @@ -108,6 +99,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -130,6 +128,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index 229f377b6ee..891b5d11c9e 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -66,20 +66,14 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml' - - name: Set up Go + - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 + uses: ./.github/actions/tune-os - name: Set up python if: steps.changes.outputs.end_to_end == 'true' uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - - name: Tune the OS - if: steps.changes.outputs.end_to_end == 'true' - uses: ./.github/actions/tune-os - - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -97,9 +91,6 @@ jobs: sudo service etcd stop - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - # Checkout to the last release of Vitess - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.changes.outputs.end_to_end == 'true' @@ -108,6 +99,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -130,6 +128,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/upgrade_downgrade_test_semi_sync.yml b/.github/workflows/upgrade_downgrade_test_semi_sync.yml index a17493636a2..0c339c03df8 100644 --- a/.github/workflows/upgrade_downgrade_test_semi_sync.yml +++ b/.github/workflows/upgrade_downgrade_test_semi_sync.yml @@ -34,13 +34,6 @@ jobs: fetch-depth: 0 persist-credentials: 'false' - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - - name: Check for changes in relevant files uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 id: changes @@ -62,20 +55,22 @@ jobs: - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_semi_sync.yml' - - name: Set up Go - if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: 1.24.7 - - - name: Set up python + - name: Set output with latest release branch + id: output-previous-release-ref if: steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Tune the OS if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/tune-os + - name: Set up python + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - name: Setup MySQL if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/setup-mysql @@ -92,11 +87,6 @@ jobs: sudo apt-get install -y make unzip g++ etcd-client etcd-server curl git wget grep sudo service etcd stop - - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 @@ -113,6 +103,13 @@ jobs: ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for the last release if: steps.changes.outputs.end_to_end == 'true' run: | @@ -135,6 +132,13 @@ jobs: with: persist-credentials: 'false' + - name: Set up Go + if: steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: go.mod + cache: false + - name: Get dependencies for this commit if: steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/vitess_tester_vtgate.yml b/.github/workflows/vitess_tester_vtgate.yml index 075908f42f0..0bae2a76a7a 100644 --- a/.github/workflows/vitess_tester_vtgate.yml +++ b/.github/workflows/vitess_tester_vtgate.yml @@ -93,7 +93,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@99fa7f0daf16db969f54a49139a14471e633e6e8 # install vitess tester go install github.com/vitessio/vt/go/vt@e43009309f599378504905d4b804460f47822ac5 diff --git a/.github/workflows/vtop_example.yml b/.github/workflows/vtop_example.yml index 49eba9909bd..feb347a886f 100644 --- a/.github/workflows/vtop_example.yml +++ b/.github/workflows/vtop_example.yml @@ -14,7 +14,7 @@ concurrency: jobs: build: name: VTop Example - runs-on: self-hosted + runs-on: oracle-vm-8cpu-32gb-x86-64 steps: - name: Skip CI @@ -60,6 +60,12 @@ jobs: if: steps.changes.outputs.end_to_end == 'true' uses: ./.github/actions/tune-os + - name: Setup MySQL + if: steps.changes.outputs.end_to_end == 'true' + uses: ./.github/actions/setup-mysql + with: + flavor: mysql-8.4 + - name: Get dependencies if: steps.changes.outputs.end_to_end == 'true' run: | @@ -83,4 +89,4 @@ jobs: timeout-minutes: 60 run: | source build.env - go run test.go -docker=false -skip-build -print-log -follow -retry=1 -timeout=60m vtop_example \ No newline at end of file + go run test.go -docker=false -skip-build -print-log -follow -retry=1 -timeout=60m vtop_example diff --git a/.golangci.yml b/.golangci.yml index 435bdc8b2b3..249240a0132 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,6 +12,7 @@ linters: - ineffassign - mirror - nolintlint + - nosprintfhostport - perfsprint - sqlclosecheck - staticcheck @@ -155,6 +156,7 @@ linters: path: ^go/stats/statsd/ - linters: - errcheck + - nosprintfhostport - sqlclosecheck path: ^go/test/ - linters: diff --git a/MAINTAINERS.md b/MAINTAINERS.md index c8a3c4d3976..614dc88a2a9 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -41,7 +41,7 @@ systay, harshit-gangal, frouioui, dbussink shlomi-noach, dbussink ### Cluster Management -dbussink +dbussink, timvaillancourt ### Java harshit-gangal @@ -55,6 +55,9 @@ beingnoble03, rohit-nayak-ps ### Messaging derekperkins, mattlord +### High Availability +mattlord, timvaillancourt + ## Past Maintainers We thank the following past maintainers for their contributions. diff --git a/Makefile b/Makefile index 79f72c8cbb7..9d33f618d96 100644 --- a/Makefile +++ b/Makefile @@ -284,7 +284,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # Please read docker/README.md to understand the different available images. # This rule builds the bootstrap images for all flavors. -DOCKER_IMAGES_FOR_TEST = mysql80 mysql84 percona80 +DOCKER_IMAGES_FOR_TEST = mysql80 mysql84 percona80 percona84 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) BOOTSTRAP_VERSION=49 ensure_bootstrap_version: @@ -339,7 +339,7 @@ docker_lite: docker_mini: ${call build_docker_image,docker/mini/Dockerfile,vitess/mini} -DOCKER_VTTESTSERVER_SUFFIX = mysql84 +DOCKER_VTTESTSERVER_SUFFIX = mysql84 percona84 DOCKER_VTTESTSERVER_TARGETS = $(addprefix docker_vttestserver_,$(DOCKER_VTTESTSERVER_SUFFIX)) $(DOCKER_VTTESTSERVER_TARGETS): docker_vttestserver_%: ${call build_docker_image,docker/vttestserver/Dockerfile.$*,vitess/vttestserver:$*} diff --git a/changelog/18.0/18.0.0/release_notes.md b/changelog/18.0/18.0.0/release_notes.md index 9851245a648..26d5c5da79f 100644 --- a/changelog/18.0/18.0.0/release_notes.md +++ b/changelog/18.0/18.0.0/release_notes.md @@ -1,4 +1,5 @@ # Release of Vitess v18.0.0 + ## Summary ### Table of Contents diff --git a/changelog/24.0/24.0.0/summary.md b/changelog/24.0/24.0.0/summary.md new file mode 100644 index 00000000000..5b840e64209 --- /dev/null +++ b/changelog/24.0/24.0.0/summary.md @@ -0,0 +1,32 @@ +# Release of Vitess v24.0.0 +## Summary + +### Table of Contents + +- **[Minor Changes](#minor-changes)** + - **[VTGate](#minor-changes-vtgate)** + - [New default for `--legacy-replication-lag-algorithm` flag](#vtgate-new-default-legacy-replication-lag-algorithm) + - **[VTTablet](#minor-changes-vttablet)** + - [New Experimental flag `--init-tablet-type-lookup`](#vttablet-init-tablet-type-lookup) + +## Minor Changes + +### VTGate + +#### New default for `--legacy-replication-lag-algorithm` flag + +The VTGate flag `--legacy-replication-lag-algorithm` now defaults to `false`, disabling the legacy approach to handling replication lag by default. + +Instead, a simpler algorithm purely based on low lag, high lag and minimum number of tablets is used, which has proven to be more stable in many production environments. A detailed explanation of the two approaches [is explained in this code comment](https://github.com/vitessio/vitess/blob/main/go/vt/discovery/replicationlag.go#L125-L149). + +In v25 this flag will become deprecated and in the following release it will be removed. In the meantime, the legacy behaviour can be used by setting `--legacy-replication-lag-algorithm=true`. This deprecation is tracked in https://github.com/vitessio/vitess/issues/18914. + +### VTTablet + +#### New Experimental flag `--init-tablet-type-lookup` + +The new experimental flag `--init-tablet-type-lookup` for VTTablet allows tablets to automatically restore their previous tablet type on restart by looking up the existing topology record, rather than always using the static `--init-tablet-type` value. + +When enabled, the tablet uses its alias to look up the tablet type from the existing topology record on restart. This allows tablets to maintain their changed roles (e.g., RDONLY/DRAINED) across restarts without manual reconfiguration. If disabled or if no topology record exists, the standard `--init-tablet-type` value will be used instead. + +**Note**: Vitess Operator–managed deployments generally do not keep tablet records in the topo between restarts, so this feature will not take effect in those environments. diff --git a/changelog/24.0/README.md b/changelog/24.0/README.md new file mode 100644 index 00000000000..7540680b861 --- /dev/null +++ b/changelog/24.0/README.md @@ -0,0 +1,2 @@ +## v24.0 +* **[24.0.0](24.0.0)** diff --git a/changelog/README.md b/changelog/README.md index b5cfdedcc8c..2d0ff5ca479 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,4 +1,5 @@ ## Releases +* [24.0](24.0) * [23.0](23.0) * [22.0](22.0) * [21.0](21.0) diff --git a/docker/lite/Dockerfile.percona84 b/docker/lite/Dockerfile.percona84 new file mode 100644 index 00000000000..05783a12a49 --- /dev/null +++ b/docker/lite/Dockerfile.percona84 @@ -0,0 +1,64 @@ +# Copyright 2025 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM --platform=linux/amd64 golang:1.25.3-bookworm AS builder + +# Allows docker builds to set the BUILD_NUMBER +ARG BUILD_NUMBER + +# Allows docker builds to set the BUILD_GIT_BRANCH +ARG BUILD_GIT_BRANCH + +# Allows docker builds to set the BUILD_GIT_REV +ARG BUILD_GIT_REV + +# Allows docker builds to set the BUILD_TIME +ARG BUILD_TIME + +WORKDIR /vt/src/vitess.io/vitess + +# Create vitess user +RUN groupadd -r vitess && useradd -r -g vitess vitess +RUN mkdir -p /vt/vtdataroot /home/vitess +RUN chown -R vitess:vitess /vt /home/vitess +USER vitess + +# Re-copy sources from working tree. +COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess + +RUN make install PREFIX=/vt/install + +# Start over and build the final image. +FROM --platform=linux/amd64 debian:bookworm-slim + +# Install dependencies +COPY docker/utils/install_dependencies.sh /vt/dist/install_dependencies.sh +RUN /vt/dist/install_dependencies.sh percona84 + +# Set up Vitess user and directory tree. +RUN groupadd -r vitess && useradd -r -g vitess vitess +RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt +ENV VTDATAROOT /vt/vtdataroot +ENV PATH $VTROOT/bin:$PATH + +# Copy artifacts from builder layer. +COPY --from=builder --chown=vitess:vitess /vt/install /vt +COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin + +# Create mount point for actual data (e.g. MySQL data dir) +VOLUME /vt/vtdataroot +USER vitess diff --git a/docker/utils/install_dependencies.sh b/docker/utils/install_dependencies.sh index ebb5f399a34..275d9fd2bd7 100755 --- a/docker/utils/install_dependencies.sh +++ b/docker/utils/install_dependencies.sh @@ -133,6 +133,15 @@ percona80) percona-xtrabackup-80 ) ;; +percona84) + PACKAGES=( + libperconaserverclient22 + percona-telemetry-agent + percona-server-rocksdb + percona-server-server + percona-xtrabackup-84 + ) + ;; *) echo "Unknown flavor ${FLAVOR}" exit 1 @@ -169,6 +178,12 @@ percona80) echo 'deb http://repo.percona.com/apt bookworm main' > /etc/apt/sources.list.d/percona.list echo 'deb http://repo.percona.com/ps-80/apt bookworm main' > /etc/apt/sources.list.d/percona80.list ;; +percona84) + echo 'deb http://repo.percona.com/apt bookworm main' > /etc/apt/sources.list.d/percona.list + echo 'deb http://repo.percona.com/pxb-84-lts/apt bookworm main' >> /etc/apt/sources.list.d/percona.list + echo 'deb http://repo.percona.com/telemetry/apt bookworm main' > /etc/apt/sources.list.d/percona-telemetry.list + echo 'deb http://repo.percona.com/ps-84-lts/apt bookworm main' > /etc/apt/sources.list.d/percona84.list + ;; esac # Pre-fill values for installation prompts that are normally interactive. @@ -178,6 +193,13 @@ percona80) debconf debconf/frontend select Noninteractive percona-server-server-8.0 percona-server-server/root_password password 'unused' percona-server-server-8.0 percona-server-server/root_password_again password 'unused' +EOF + ;; +percona84) + debconf-set-selections < --init-keyspace string (init parameter) keyspace to use for this tablet --init-shard string (init parameter) shard to use for this tablet - --init-tablet-type string (init parameter) tablet type to use for this tablet. Valid values are: PRIMARY, REPLICA, SPARE, and RDONLY. The default is REPLICA. + --init-tablet-type string (init parameter) tablet type to use for this tablet. Valid values are: REPLICA, RDONLY, and SPARE. The default is REPLICA. + --init-tablet-type-lookup (Experimental, init parameter) if enabled, uses tablet alias to look up the tablet type from the existing topology record on restart and use that instead of init-tablet-type. This allows tablets to maintain their changed roles (e.g., RDONLY/DRAINED) across restarts. If disabled or if no topology record exists, init-tablet-type will be used. --init-tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet --init-timeout duration (init parameter) timeout to use for the init phase. (default 1m0s) --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 6535fe13bac..b1f4ef4b0b0 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -109,7 +109,7 @@ Flags: --keep-logs-by-mtime duration keep logs for this long (using mtime) (zero to keep forever) --keyspaces-to-watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema. --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --legacy-replication-lag-algorithm Use the legacy algorithm when selecting vttablets for serving. (default true) + --legacy-replication-lag-algorithm (DEPRECATED) Use the legacy algorithm when selecting vttablets for serving. --lock-heartbeat-time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s) --lock-timeout duration Maximum time to wait when attempting to acquire a lock from the topo server (default 45s) --log-err-stacks log stack traces for errors diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index 0b2e911768c..2a0cd50e65a 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -201,7 +201,8 @@ Flags: --init-db-name-override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_ --init-keyspace string (init parameter) keyspace to use for this tablet --init-shard string (init parameter) shard to use for this tablet - --init-tablet-type string (init parameter) tablet type to use for this tablet. Valid values are: PRIMARY, REPLICA, SPARE, and RDONLY. The default is REPLICA. + --init-tablet-type string (init parameter) tablet type to use for this tablet. Valid values are: REPLICA, RDONLY, and SPARE. The default is REPLICA. + --init-tablet-type-lookup (Experimental, init parameter) if enabled, uses tablet alias to look up the tablet type from the existing topology record on restart and use that instead of init-tablet-type. This allows tablets to maintain their changed roles (e.g., RDONLY/DRAINED) across restarts. If disabled or if no topology record exists, init-tablet-type will be used. --init-tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet --init-timeout duration (init parameter) timeout to use for the init phase. (default 1m0s) --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index deff89cde89..ad05efc4b19 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -43,6 +43,7 @@ Flags: --external-topo-implementation string the topology implementation to use for vtcombo process --extra-my-cnf string extra files to add to the config, separated by ':' --foreign-key-mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow") + --gateway-initial-tablet-timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s) --grpc-auth-mode string Which auth plugin implementation to use (eg: static) --grpc-auth-mtls-allowed-substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc-auth-static-client-creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. diff --git a/go/pools/smartconnpool/pool.go b/go/pools/smartconnpool/pool.go index 2beb0b8ac46..b6b7850226b 100644 --- a/go/pools/smartconnpool/pool.go +++ b/go/pools/smartconnpool/pool.go @@ -460,9 +460,13 @@ func (pool *ConnPool[C]) pop(stack *connStack[C]) *Pooled[C] { // to expire this connection (even if it's still visible to them), so it's // safe to return it for conn, ok := stack.Pop(); ok; conn, ok = stack.Pop() { - if conn.timeUsed.borrow() { - return conn + if !conn.timeUsed.borrow() { + // Ignore the connection that couldn't be borrowed; + // it's being closed by the idle worker and replaced by a new connection. + continue } + + return conn } return nil } @@ -787,11 +791,23 @@ func (pool *ConnPool[C]) closeIdleResources(now time.Time) { for conn := s.Peek(); conn != nil; conn = conn.next.Load() { if conn.timeUsed.expired(mono, timeout) { pool.Metrics.idleClosed.Add(1) + conn.Close() + pool.closedConn() + // Using context.Background() is fine since MySQL connection already enforces // a connect timeout via the `db-connect-timeout-ms` config param. - if err := pool.connReopen(context.Background(), conn, mono); err != nil { - pool.closedConn() + c, err := pool.getNew(context.Background()) + if err != nil { + // If we couldn't open a new connection, just continue + continue + } + + // opening a new connection might have raced with other goroutines, + // so it's possible that we got back `nil` here + if c != nil { + // Return the new connection to the pool + pool.tryReturnConn(c) } } } diff --git a/go/pools/smartconnpool/pool_test.go b/go/pools/smartconnpool/pool_test.go index 666a04a7ee3..b71999af23c 100644 --- a/go/pools/smartconnpool/pool_test.go +++ b/go/pools/smartconnpool/pool_test.go @@ -1319,3 +1319,88 @@ func TestCloseDuringWaitForConn(t *testing.T) { require.EqualValues(t, 0, state.open.Load()) } } + +// TestIdleTimeoutConnectionLeak checks for leaked connections after idle timeout +func TestIdleTimeoutConnectionLeak(t *testing.T) { + var state TestState + + // Slow connection creation to ensure idle timeout happens during reopening + state.chaos.delayConnect = 300 * time.Millisecond + + p := NewPool(&Config[*TestConn]{ + Capacity: 2, + IdleTimeout: 50 * time.Millisecond, + LogWait: state.LogWait, + }).Open(newConnector(&state), nil) + + getCtx, cancel := context.WithTimeout(t.Context(), 500*time.Millisecond) + defer cancel() + + // Get and return two connections + conn1, err := p.Get(getCtx, nil) + require.NoError(t, err) + + conn2, err := p.Get(getCtx, nil) + require.NoError(t, err) + + p.put(conn1) + p.put(conn2) + + // At this point: Active=2, InUse=0, Available=2 + require.EqualValues(t, 2, p.Active()) + require.EqualValues(t, 0, p.InUse()) + require.EqualValues(t, 2, p.Available()) + + // Wait for idle timeout to kick in and start expiring connections + require.EventuallyWithT(t, func(c *assert.CollectT) { + // Check the actual number of currently open connections + assert.Equal(c, int64(2), state.open.Load()) + // Check the total number of closed connections + assert.Equal(c, int64(1), state.close.Load()) + }, 100*time.Millisecond, 10*time.Millisecond) + + // At this point, the idle timeout worker has expired the connections + // and is trying to reopen them (which takes 300ms due to delayConnect) + + // Try to get connections while they're being reopened + // This should trigger the bug where connections get discarded + for i := 0; i < 2; i++ { + getCtx, cancel := context.WithTimeout(t.Context(), 50*time.Millisecond) + defer cancel() + + conn, err := p.Get(getCtx, nil) + require.NoError(t, err) + + p.put(conn) + } + + // Wait a moment for all reopening to complete + require.EventuallyWithT(t, func(c *assert.CollectT) { + // Check the actual number of currently open connections + require.Equal(c, int64(2), state.open.Load()) + // Check the total number of closed connections + require.Equal(c, int64(2), state.close.Load()) + }, 400*time.Millisecond, 10*time.Millisecond) + + // Check the pool state + assert.Equal(t, int64(2), p.Active()) + assert.Equal(t, int64(0), p.InUse()) + assert.Equal(t, int64(2), p.Available()) + assert.Equal(t, int64(2), p.Metrics.IdleClosed()) + + // Try to close the pool - if there are leaked connections, this will timeout + closeCtx, cancel := context.WithTimeout(t.Context(), 500*time.Millisecond) + defer cancel() + + err = p.CloseWithContext(closeCtx) + require.NoError(t, err) + + // Pool should be completely closed now + assert.Equal(t, int64(0), p.Active()) + assert.Equal(t, int64(0), p.InUse()) + assert.Equal(t, int64(0), p.Available()) + assert.Equal(t, int64(2), p.Metrics.IdleClosed()) + + assert.Equal(t, int64(0), state.open.Load()) + assert.Equal(t, int64(4), state.close.Load()) +} diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index 1ee754aa143..4943fbbf285 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -419,6 +419,20 @@ func (vttablet *VttabletProcess) TearDown() error { return vttablet.TearDownWithTimeout(vttabletStateTimeout) } +func (vttablet *VttabletProcess) Stop() { + if vttablet.proc == nil || vttablet.exit == nil { + return + } + vttablet.proc.Process.Signal(syscall.SIGSTOP) +} + +func (vttablet *VttabletProcess) Resume() { + if vttablet.proc == nil || vttablet.exit == nil { + return + } + vttablet.proc.Process.Signal(syscall.SIGCONT) +} + // Kill shuts down the running vttablet service immediately. func (vttablet *VttabletProcess) Kill() error { if vttablet.proc == nil || vttablet.exit == nil { diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 7473bf89c9e..cfc94fb6b26 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -19,9 +19,11 @@ package emergencyreparent import ( "context" "os/exec" + "sync" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -29,6 +31,8 @@ import ( "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) func TestTrivialERS(t *testing.T) { @@ -131,6 +135,138 @@ func TestReparentDownPrimary(t *testing.T) { utils.ResurrectTablet(ctx, t, clusterInstance, tablets[0]) } +func TestEmergencyReparentWithBlockedPrimary(t *testing.T) { + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) + defer utils.TeardownCluster(clusterInstance) + + if clusterInstance.VtTabletMajorVersion < 24 { + t.Skip("Skipping test since `DemotePrimary` on earlier versions does not handle blocked primaries correctly") + } + + // start vtgate w/disabled buffering + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--enable_buffer=false", + "--query-timeout", "3000") + err := clusterInstance.StartVtgate() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + conn, err := mysql.Connect(ctx, &mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + }) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("CREATE TABLE test (id INT PRIMARY KEY, msg VARCHAR(64))", 0, false) + require.NoError(t, err) + + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + + // Simulate no semi-sync replicas being available by disabling semi-sync on all replicas + for _, tablet := range tablets[1:] { + utils.RunSQL(ctx, t, "STOP REPLICA IO_THREAD", tablet) + + // Disable semi-sync on replicas to simulate blocking + semisyncType, err := utils.SemiSyncExtensionLoaded(context.Background(), tablet) + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + utils.RunSQL(context.Background(), t, "SET GLOBAL rpl_semi_sync_replica_enabled = false", tablet) + case mysql.SemiSyncTypeMaster: + utils.RunSQL(context.Background(), t, "SET GLOBAL rpl_semi_sync_slave_enabled = false", tablet) + } + + utils.RunSQL(context.Background(), t, "START REPLICA IO_THREAD", tablet) + } + + // Try performing a write and ensure that it blocks. + writeSQL := `insert into test(id, msg) values (1, 'test 1')` + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + + // Attempt writing via vtgate against the primary. This should block (because there's no replicas to ack the semi-sync), + // and fail on the vtgate query timeout. Async replicas will still receive this write (probably), because it is written + // to the PRIMARY binlog even when no ackers exist. This means we need to disable the vtgate buffer (above), because it + // will attempt the write on the promoted, unblocked primary - and this will hit a dupe key error. + _, err := conn.ExecuteFetch(writeSQL, 0, false) + + // The error here could be one of: + // * target: ks.0.primary: vttablet: rpc error: code = DeadlineExceeded desc = context deadline exceeded (errno 1317) (sqlstate 70100) during query: insert into test(id, msg) values (1, 'test 1') + // * target: ks.0.primary: vttablet: rpc error: code = DeadlineExceeded desc = stream terminated by RST_STREAM with error code: CANCEL (errno 1317) (sqlstate 70100) during query: insert into test(id, msg) values (1, 'test 1') + // * ... + // + // So we only check for the part of the error message that's consistent. + require.ErrorContains(t, err, "(errno 1317) (sqlstate 70100) during query: insert into test(id, msg) values (1, 'test 1')") + + // Verify vtgate really processed the insert in case something unrelated caused the deadline exceeded. + vtgateVars := clusterInstance.VtgateProcess.GetVars() + require.NotNil(t, vtgateVars) + require.NotNil(t, vtgateVars["QueryRoutes"]) + require.NotNil(t, vtgateVars["VtgateApiErrorCounts"]) + require.EqualValues(t, map[string]interface{}{ + "DDL.DirectDDL.PRIMARY": float64(1), + "INSERT.Passthrough.PRIMARY": float64(1), + }, vtgateVars["QueryRoutes"]) + require.EqualValues(t, map[string]interface{}{ + "Execute.ks.primary.DEADLINE_EXCEEDED": float64(1), + }, vtgateVars["VtgateApiErrorCounts"]) + }() + + wg.Add(1) + waitReplicasTimeout := time.Second * 10 + go func() { + defer wg.Done() + + // Ensure the write (other goroutine above) is blocked waiting on ACKs on the primary. + utils.WaitForQueryWithStateInProcesslist(context.Background(), t, tablets[0], writeSQL, "Waiting for semi-sync ACK from replica", time.Second*20) + + // Send SIGSTOP to primary to simulate it being unresponsive. + tablets[0].VttabletProcess.Stop() + + // Run forced reparent operation, this should now proceed unimpeded. + out, err := utils.Ers(clusterInstance, tablets[1], "15s", waitReplicasTimeout.String()) + require.NoError(t, err, out) + }() + + wg.Wait() + + // We need to wait at least 10 seconds here to ensure the wait-for-replicas-timeout has passed, + // before we resume the old primary - otherwise the old primary will receive a `SetReplicationSource` call. + time.Sleep(waitReplicasTimeout * 2) + + // Bring back the demoted primary + tablets[0].VttabletProcess.Resume() + + // Give the old primary some time to realize it's no longer the primary, + // and for a new primary to be promoted. + require.EventuallyWithT(t, func(c *assert.CollectT) { + // Ensure the old primary was demoted correctly + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablets[0].Alias) + require.NoError(c, err) + + // The old primary should have noticed there's a new primary tablet now and should + // have demoted itself to REPLICA. + require.Equal(c, topodatapb.TabletType_REPLICA, tabletInfo.GetType()) + + // The old primary should be in not serving mode because we should be unable to re-attach it + // as a replica due to the errant GTID caused by semi-sync writes that were never replicated out. + // + // Note: The writes that were not replicated were caused by the semi sync unblocker, which + // performed writes after ERS. + require.Equal(c, "NOT_SERVING", tablets[0].VttabletProcess.GetTabletStatus()) + require.Equal(c, "replica", tablets[0].VttabletProcess.GetTabletType()) + + // Check the 2nd tablet becomes PRIMARY. + require.Equal(c, "SERVING", tablets[1].VttabletProcess.GetTabletStatus()) + require.Equal(c, "primary", tablets[1].VttabletProcess.GetTabletType()) + }, 30*time.Second, time.Second, "could not validate primary was demoted") +} + func TestReparentNoChoiceDownPrimary(t *testing.T) { clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index a91584c2582..e48f8d9657d 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -32,16 +32,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" - "vitess.io/vitess/go/vt/vttablet/tabletconn" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" + "vitess.io/vitess/go/vt/vttablet/tabletconn" ) var ( @@ -843,6 +842,7 @@ func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.V } } +// WaitForTabletToBeServing waits for a tablet to reach a serving state. func WaitForTabletToBeServing(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) { vTablet, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) @@ -863,3 +863,22 @@ func WaitForTabletToBeServing(ctx context.Context, t *testing.T, clusterInstance t.Fatal(err.Error()) } } + +// WaitForQueryWithStateInProcesslist waits for a query to be present in the processlist with a specific state. +func WaitForQueryWithStateInProcesslist(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, sql, state string, timeout time.Duration) { + require.Eventually(t, func() bool { + qr := RunSQL(ctx, t, "select Command, State, Info from information_schema.processlist", tablet) + for _, row := range qr.Rows { + if len(row) != 3 { + continue + } + if strings.EqualFold(row[0].ToString(), "Query") { + continue + } + if strings.EqualFold(row[1].ToString(), state) && strings.EqualFold(row[2].ToString(), sql) { + return true + } + } + return false + }, timeout, time.Second, "query with state not in processlist") +} diff --git a/go/test/endtoend/vtgate/foreignkey/fk_test.go b/go/test/endtoend/vtgate/foreignkey/fk_test.go index 23b71002fe6..cb4594db8c2 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_test.go @@ -30,9 +30,10 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" ) // TestInsertWithFK tests that insertions work as expected when foreign key management is enabled in Vitess. @@ -1505,6 +1506,45 @@ create table temp2(id bigint auto_increment primary key, col varchar(20) not nul mcmp.ExecAllowAndCompareError(`insert into temp1(col) values('d') `, utils.CompareOptions{}) } +// TestForeignKeyWithKeyspaceQualifier tests that CREATE TABLE with foreign key references +// that include keyspace qualifiers work correctly. This addresses bug #18889 where keyspace +// names were not being stripped before being sent to MySQL, causing failures because MySQL +// expects database names (vt_) not keyspace names. +func TestForeignKeyWithKeyspaceQualifier(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, `use uks`) + + // Create the parent table. + utils.Exec(t, mcmp.VtConn, `create table fk_parent(id bigint primary key)`) + + // Create the child table with keyspace-qualified foreign key reference. + utils.Exec(t, mcmp.VtConn, `create table fk_child(id bigint primary key, parent_id bigint, foreign key (parent_id) references uks.fk_parent(id))`) + + // Verify that the foreign key constraint works. + utils.Exec(t, mcmp.VtConn, `insert into fk_parent(id) values (1), (2)`) + utils.Exec(t, mcmp.VtConn, `insert into fk_child(id, parent_id) values (100, 1)`) + + // This should fail due to FK constraint. + _, err := utils.ExecAllowError(t, mcmp.VtConn, `insert into fk_child(id, parent_id) values (101, 999)`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // Test ALTER TABLE with keyspace-qualified foreign key. + utils.Exec(t, mcmp.VtConn, `create table fk_child2(id bigint primary key, parent_id bigint)`) + utils.Exec(t, mcmp.VtConn, `alter table fk_child2 add foreign key (parent_id) references uks.fk_parent(id)`) + + // Verify the constraint works for the altered table. + utils.Exec(t, mcmp.VtConn, `insert into fk_child2(id, parent_id) values (200, 2)`) + _, err = utils.ExecAllowError(t, mcmp.VtConn, `insert into fk_child2(id, parent_id) values (201, 888)`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // Clean up. + utils.Exec(t, mcmp.VtConn, `drop table fk_child`) + utils.Exec(t, mcmp.VtConn, `drop table fk_child2`) + utils.Exec(t, mcmp.VtConn, `drop table fk_parent`) +} + // TestRestrictFkOnNonStandardKey verifies that restrict_fk_on_non_standard_key is set to off func TestRestrictFkOnNonStandardKey(t *testing.T) { mcmp, closer := start(t) diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index 0fc1c810eb3..1fdec581dc3 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -224,14 +224,20 @@ func TestSeq(t *testing.T) { require.Nil(t, err) defer conn.Close() - //Initialize seq table - utils.Exec(t, conn, "insert into sequence_test_seq(id, next_id, cache) values(0,1,10)") + // Initialize seq table if needed + qr := utils.Exec(t, conn, "select count(*) from sequence_test_seq") + require.Len(t, qr.Rows, 1) + cnt, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + if cnt == 0 { + utils.Exec(t, conn, "insert into sequence_test_seq(id, next_id, cache) values(0,1,10)") + } //Insert 4 values in the main table utils.Exec(t, conn, "insert into sequence_test(val) values('a'), ('b') ,('c'), ('d')") // Test select calls to main table and verify expected id. - qr := utils.Exec(t, conn, "select id, val from sequence_test where id=4") + qr = utils.Exec(t, conn, "select id, val from sequence_test where id=4") if got, want := fmt.Sprintf("%v", qr.Rows), `[[INT64(4) VARCHAR("d")]]`; got != want { t.Errorf("select:\n%v want\n%v", got, want) } @@ -313,3 +319,137 @@ func TestInsertAllDefaults(t *testing.T) { _, err = conn.ExecuteFetch("insert into lookup_vindex () values ()", 0, false) require.Error(t, err) } + +// TestLastInsertIDWithSequence tests that LAST_INSERT_ID() returns the correct +// sequence-generated value after an INSERT in both sharded and unsharded keyspaces. +// This is a regression test for https://github.com/vitessio/vitess/issues/18946 +func TestLastInsertIDWithSequence(t *testing.T) { + ctx := context.Background() + + t.Run("unsharded keyspace", func(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + DbName: unshardedKs, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Initialize seq table if needed + qr := utils.Exec(t, conn, "select count(*) from sequence_test_seq") + require.Len(t, qr.Rows, 1) + cnt, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + if cnt == 0 { + utils.Exec(t, conn, "insert into sequence_test_seq(id, next_id, cache) values(0,1,10)") + } + + // Clean up (don't reinitialize sequence - vtgate caches values in memory) + utils.Exec(t, conn, "delete from sequence_test") + + // Insert a row - the sequence should generate an ID + utils.Exec(t, conn, "insert into sequence_test(val) values('test1')") + + // LAST_INSERT_ID() should return a non-zero sequence-generated value + qr = utils.Exec(t, conn, "select LAST_INSERT_ID()") + require.Len(t, qr.Rows, 1, "should have one row") + firstID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.NotEqual(t, 0, firstID, "LAST_INSERT_ID() should not be 0 after INSERT with sequence") + + // Insert another row + utils.Exec(t, conn, "insert into sequence_test(val) values('test2')") + + // LAST_INSERT_ID() should return the new sequence value + qr = utils.Exec(t, conn, "select LAST_INSERT_ID()") + require.Len(t, qr.Rows, 1, "should have one row") + secondID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.Greater(t, secondID, firstID, "second LAST_INSERT_ID() value from sequence should be greater than the first") + + // Verify the inserted rows have the expected LAST_INSERT_ID values. + qr = utils.Exec(t, conn, "select id from sequence_test where val = 'test1'") + require.Len(t, qr.Rows, 1, "should have one row") + firstInsertedID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.Equal(t, firstInsertedID, firstID, "first inserted row should have the first LAST_INSERT_ID value") + qr = utils.Exec(t, conn, "select id from sequence_test where val = 'test2'") + require.Len(t, qr.Rows, 1, "should have one row") + secondInsertedID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.Equal(t, secondInsertedID, secondID, "second inserted row should have the secnd LAST_INSERT_ID value") + }) + + t.Run("sharded keyspace", func(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + DbName: shardedKeyspaceName, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Clean up + utils.Exec(t, conn, "delete from allDefaults") + + // Get the current next_id from the sequence + qr := utils.Exec(t, conn, "select next_id from uks.id_seq") + require.Len(t, qr.Rows, 1, "should have one row in id_seq") + + // Insert a row - the sequence should generate an ID + utils.Exec(t, conn, "insert into allDefaults(foo) values('bar')") + + // LAST_INSERT_ID() should return the sequence-generated value + qr = utils.Exec(t, conn, "select LAST_INSERT_ID()") + require.Len(t, qr.Rows, 1, "should have one row") + lastInsertID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.NotEqual(t, 0, lastInsertID, "LAST_INSERT_ID() should not be 0 after INSERT with sequence in sharded keyspace") + + // Verify the inserted row has the same ID + qr = utils.Exec(t, conn, fmt.Sprintf("select id from allDefaults where id = %d", +lastInsertID)) + require.Len(t, qr.Rows, 1, "should be able to find the row by the LAST_INSERT_ID value") + lastInsertedID, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.Equal(t, lastInsertID, lastInsertedID) + }) + + t.Run("within transaction", func(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + DbName: unshardedKs, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Note: We don't reinitialize the sequence here because vtgate caches + // sequence values in memory. Instead, we just verify the behavior + // works correctly within a transaction regardless of the actual value. + + // Start a transaction + utils.Exec(t, conn, "begin") + + // Insert a row + utils.Exec(t, conn, "insert into sequence_test(val) values('txtest')") + + // LAST_INSERT_ID() should work within the transaction and return non-zero + qr := utils.Exec(t, conn, "select LAST_INSERT_ID()") + require.Len(t, qr.Rows, 1, "should have one row") + lastInsertIDInTx, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.NotEqual(t, 0, lastInsertIDInTx, "LAST_INSERT_ID() should not be 0 within transaction") + + utils.Exec(t, conn, "commit") + + // LAST_INSERT_ID() should still return the same value after commit + qr = utils.Exec(t, conn, "select LAST_INSERT_ID()") + require.Len(t, qr.Rows, 1, "should have one row") + lastInsertIDAfterCommit, err := qr.Rows[0][0].ToInt() + require.NoError(t, err) + assert.Equal(t, lastInsertIDInTx, lastInsertIDAfterCommit, "LAST_INSERT_ID() should persist after commit") + }) +} diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go index 0083941d600..27f23e2a6dc 100644 --- a/go/tools/go-upgrade/go-upgrade.go +++ b/go/tools/go-upgrade/go-upgrade.go @@ -346,6 +346,7 @@ func replaceGoVersionInCodebase(old, new *version.Version) error { "./docker/lite/Dockerfile.mysql80", "./docker/lite/Dockerfile.mysql84", "./docker/lite/Dockerfile.percona80", + "./docker/lite/Dockerfile.percona84", "./docker/vttestserver/Dockerfile.mysql80", "./docker/vttestserver/Dockerfile.mysql84", } diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go index f7082738c5e..ecc094b9cc0 100644 --- a/go/vt/dbconfigs/credentials.go +++ b/go/vt/dbconfigs/credentials.go @@ -60,6 +60,7 @@ var ( "mysqlctl", "mysqlctld", "vtbackup", + "vtbench", "vtcombo", "vttablet", } diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index ed2cb21527b..c3263bc8e7d 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -57,7 +57,7 @@ var ( "discovery_legacy_replication_lag_algorithm", viperutil.Options[bool]{ FlagName: "legacy-replication-lag-algorithm", - Default: true, + Default: false, }, ) ) @@ -70,7 +70,7 @@ func registerReplicationFlags(fs *pflag.FlagSet) { fs.Duration("discovery-low-replication-lag", lowReplicationLag.Default(), "Threshold below which replication lag is considered low enough to be healthy.") fs.Duration("discovery-high-replication-lag-minimum-serving", highReplicationLagMinServing.Default(), "Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag.") fs.Int("min-number-serving-vttablets", minNumTablets.Default(), "The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving.") - fs.Bool("legacy-replication-lag-algorithm", legacyReplicationLagAlgorithm.Default(), "Use the legacy algorithm when selecting vttablets for serving.") + fs.Bool("legacy-replication-lag-algorithm", legacyReplicationLagAlgorithm.Default(), "(DEPRECATED) Use the legacy algorithm when selecting vttablets for serving.") viperutil.BindFlags(fs, lowReplicationLag, diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index 4aca4f726bd..fe94486dacd 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -769,6 +769,11 @@ func (fmd *FakeMysqlDaemon) SemiSyncReplicationStatus(ctx context.Context) (bool return fmd.SemiSyncReplicaEnabled, nil } +// IsSemiSyncBlocked is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) IsSemiSyncBlocked(ctx context.Context) (bool, error) { + return false, nil +} + // GetVersionString is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetVersionString(ctx context.Context) (string, error) { return fmd.Version, nil diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 5851a980664..41896abb785 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -74,6 +74,7 @@ type MysqlDaemon interface { SemiSyncClients(ctx context.Context) (count uint32) SemiSyncSettings(ctx context.Context) (timeout uint64, numReplicas uint32) SemiSyncReplicationStatus(ctx context.Context) (bool, error) + IsSemiSyncBlocked(ctx context.Context) (bool, error) ResetReplicationParameters(ctx context.Context) error GetBinlogInformation(ctx context.Context) (binlogFormat string, logEnabled bool, logReplicaUpdate bool, binlogRowImage string, err error) GetGTIDMode(ctx context.Context) (gtidMode string, err error) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index ce0a4890782..f1bddacfc02 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -819,3 +819,30 @@ func (mysqld *Mysqld) SemiSyncExtensionLoaded(ctx context.Context) (mysql.SemiSy return conn.Conn.SemiSyncExtensionLoaded() } + +func (mysqld *Mysqld) IsSemiSyncBlocked(ctx context.Context) (bool, error) { + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) + if err != nil { + return false, err + } + defer conn.Recycle() + + // Execute the query to check if the primary is blocked on semi-sync. + semiSyncWaitSessionsRead := "select variable_value from performance_schema.global_status where regexp_like(variable_name, 'Rpl_semi_sync_(source|master)_wait_sessions')" + res, err := conn.Conn.ExecuteFetch(semiSyncWaitSessionsRead, 1, false) + if err != nil { + return false, err + } + // If we have no rows, then the primary doesn't have semi-sync enabled. + // It then follows, that the primary isn't blocked :) + if len(res.Rows) == 0 { + return false, nil + } + + // Read the status value and check if it is non-zero. + if len(res.Rows) != 1 || len(res.Rows[0]) != 1 { + return false, fmt.Errorf("unexpected number of rows received - %v", res.Rows) + } + value, err := res.Rows[0][0].ToCastInt64() + return value != 0, err +} diff --git a/go/vt/proto/querythrottler/querythrottler.pb.go b/go/vt/proto/querythrottler/querythrottler.pb.go new file mode 100644 index 00000000000..b9760bdfeaf --- /dev/null +++ b/go/vt/proto/querythrottler/querythrottler.pb.go @@ -0,0 +1,476 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.9 +// protoc v3.21.3 +// source: querythrottler.proto + +package querythrottler + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ThrottlingStrategy represents the strategy used to apply throttling +type ThrottlingStrategy int32 + +const ( + ThrottlingStrategy_UNKNOWN ThrottlingStrategy = 0 + ThrottlingStrategy_TABLET_THROTTLER ThrottlingStrategy = 1 +) + +// Enum value maps for ThrottlingStrategy. +var ( + ThrottlingStrategy_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TABLET_THROTTLER", + } + ThrottlingStrategy_value = map[string]int32{ + "UNKNOWN": 0, + "TABLET_THROTTLER": 1, + } +) + +func (x ThrottlingStrategy) Enum() *ThrottlingStrategy { + p := new(ThrottlingStrategy) + *p = x + return p +} + +func (x ThrottlingStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ThrottlingStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_querythrottler_proto_enumTypes[0].Descriptor() +} + +func (ThrottlingStrategy) Type() protoreflect.EnumType { + return &file_querythrottler_proto_enumTypes[0] +} + +func (x ThrottlingStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ThrottlingStrategy.Descriptor instead. +func (ThrottlingStrategy) EnumDescriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{0} +} + +// Config defines the runtime configuration for the IncomingQueryThrottler +type Config struct { + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Strategy ThrottlingStrategy `protobuf:"varint,2,opt,name=strategy,proto3,enum=querythrottler.ThrottlingStrategy" json:"strategy,omitempty"` + TabletStrategyConfig *TabletStrategyConfig `protobuf:"bytes,3,opt,name=tablet_strategy_config,json=tabletStrategyConfig,proto3" json:"tablet_strategy_config,omitempty"` + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Config) Reset() { + *x = Config{} + mi := &file_querythrottler_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Config) ProtoMessage() {} + +func (x *Config) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Config.ProtoReflect.Descriptor instead. +func (*Config) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{0} +} + +func (x *Config) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Config) GetStrategy() ThrottlingStrategy { + if x != nil { + return x.Strategy + } + return ThrottlingStrategy_UNKNOWN +} + +func (x *Config) GetTabletStrategyConfig() *TabletStrategyConfig { + if x != nil { + return x.TabletStrategyConfig + } + return nil +} + +func (x *Config) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +// TabletStrategyConfig holds per-tablet-type throttling rules +type TabletStrategyConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + TabletRules map[string]*StatementRuleSet `protobuf:"bytes,1,rep,name=tablet_rules,json=tabletRules,proto3" json:"tablet_rules,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TabletStrategyConfig) Reset() { + *x = TabletStrategyConfig{} + mi := &file_querythrottler_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TabletStrategyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TabletStrategyConfig) ProtoMessage() {} + +func (x *TabletStrategyConfig) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TabletStrategyConfig.ProtoReflect.Descriptor instead. +func (*TabletStrategyConfig) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{1} +} + +func (x *TabletStrategyConfig) GetTabletRules() map[string]*StatementRuleSet { + if x != nil { + return x.TabletRules + } + return nil +} + +// StatementRuleSet maps SQL statement types to metric rules +type StatementRuleSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + StatementRules map[string]*MetricRuleSet `protobuf:"bytes,1,rep,name=statement_rules,json=statementRules,proto3" json:"statement_rules,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatementRuleSet) Reset() { + *x = StatementRuleSet{} + mi := &file_querythrottler_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatementRuleSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatementRuleSet) ProtoMessage() {} + +func (x *StatementRuleSet) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatementRuleSet.ProtoReflect.Descriptor instead. +func (*StatementRuleSet) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{2} +} + +func (x *StatementRuleSet) GetStatementRules() map[string]*MetricRuleSet { + if x != nil { + return x.StatementRules + } + return nil +} + +// MetricRuleSet maps metric names to their throttling rules +type MetricRuleSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + MetricRules map[string]*MetricRule `protobuf:"bytes,1,rep,name=metric_rules,json=metricRules,proto3" json:"metric_rules,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MetricRuleSet) Reset() { + *x = MetricRuleSet{} + mi := &file_querythrottler_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MetricRuleSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricRuleSet) ProtoMessage() {} + +func (x *MetricRuleSet) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricRuleSet.ProtoReflect.Descriptor instead. +func (*MetricRuleSet) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{3} +} + +func (x *MetricRuleSet) GetMetricRules() map[string]*MetricRule { + if x != nil { + return x.MetricRules + } + return nil +} + +// MetricRule defines how to throttle based on a specific metric +type MetricRule struct { + state protoimpl.MessageState `protogen:"open.v1"` + Thresholds []*ThrottleThreshold `protobuf:"bytes,1,rep,name=thresholds,proto3" json:"thresholds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MetricRule) Reset() { + *x = MetricRule{} + mi := &file_querythrottler_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MetricRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricRule) ProtoMessage() {} + +func (x *MetricRule) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricRule.ProtoReflect.Descriptor instead. +func (*MetricRule) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{4} +} + +func (x *MetricRule) GetThresholds() []*ThrottleThreshold { + if x != nil { + return x.Thresholds + } + return nil +} + +// ThrottleThreshold defines a condition for throttling +type ThrottleThreshold struct { + state protoimpl.MessageState `protogen:"open.v1"` + Above float64 `protobuf:"fixed64,1,opt,name=above,proto3" json:"above,omitempty"` + Throttle int32 `protobuf:"varint,2,opt,name=throttle,proto3" json:"throttle,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ThrottleThreshold) Reset() { + *x = ThrottleThreshold{} + mi := &file_querythrottler_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ThrottleThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThrottleThreshold) ProtoMessage() {} + +func (x *ThrottleThreshold) ProtoReflect() protoreflect.Message { + mi := &file_querythrottler_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThrottleThreshold.ProtoReflect.Descriptor instead. +func (*ThrottleThreshold) Descriptor() ([]byte, []int) { + return file_querythrottler_proto_rawDescGZIP(), []int{5} +} + +func (x *ThrottleThreshold) GetAbove() float64 { + if x != nil { + return x.Above + } + return 0 +} + +func (x *ThrottleThreshold) GetThrottle() int32 { + if x != nil { + return x.Throttle + } + return 0 +} + +var File_querythrottler_proto protoreflect.FileDescriptor + +const file_querythrottler_proto_rawDesc = "" + + "\n" + + "\x14querythrottler.proto\x12\x0equerythrottler\"\xd7\x01\n" + + "\x06Config\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\x12>\n" + + "\bstrategy\x18\x02 \x01(\x0e2\".querythrottler.ThrottlingStrategyR\bstrategy\x12Z\n" + + "\x16tablet_strategy_config\x18\x03 \x01(\v2$.querythrottler.TabletStrategyConfigR\x14tabletStrategyConfig\x12\x17\n" + + "\adry_run\x18\x04 \x01(\bR\x06dryRun\"\xd2\x01\n" + + "\x14TabletStrategyConfig\x12X\n" + + "\ftablet_rules\x18\x01 \x03(\v25.querythrottler.TabletStrategyConfig.TabletRulesEntryR\vtabletRules\x1a`\n" + + "\x10TabletRulesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x126\n" + + "\x05value\x18\x02 \x01(\v2 .querythrottler.StatementRuleSetR\x05value:\x028\x01\"\xd3\x01\n" + + "\x10StatementRuleSet\x12]\n" + + "\x0fstatement_rules\x18\x01 \x03(\v24.querythrottler.StatementRuleSet.StatementRulesEntryR\x0estatementRules\x1a`\n" + + "\x13StatementRulesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x123\n" + + "\x05value\x18\x02 \x01(\v2\x1d.querythrottler.MetricRuleSetR\x05value:\x028\x01\"\xbe\x01\n" + + "\rMetricRuleSet\x12Q\n" + + "\fmetric_rules\x18\x01 \x03(\v2..querythrottler.MetricRuleSet.MetricRulesEntryR\vmetricRules\x1aZ\n" + + "\x10MetricRulesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x120\n" + + "\x05value\x18\x02 \x01(\v2\x1a.querythrottler.MetricRuleR\x05value:\x028\x01\"O\n" + + "\n" + + "MetricRule\x12A\n" + + "\n" + + "thresholds\x18\x01 \x03(\v2!.querythrottler.ThrottleThresholdR\n" + + "thresholds\"E\n" + + "\x11ThrottleThreshold\x12\x14\n" + + "\x05above\x18\x01 \x01(\x01R\x05above\x12\x1a\n" + + "\bthrottle\x18\x02 \x01(\x05R\bthrottle*7\n" + + "\x12ThrottlingStrategy\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\x14\n" + + "\x10TABLET_THROTTLER\x10\x01B-Z+vitess.io/vitess/go/vt/proto/querythrottlerb\x06proto3" + +var ( + file_querythrottler_proto_rawDescOnce sync.Once + file_querythrottler_proto_rawDescData []byte +) + +func file_querythrottler_proto_rawDescGZIP() []byte { + file_querythrottler_proto_rawDescOnce.Do(func() { + file_querythrottler_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_querythrottler_proto_rawDesc), len(file_querythrottler_proto_rawDesc))) + }) + return file_querythrottler_proto_rawDescData +} + +var file_querythrottler_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_querythrottler_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_querythrottler_proto_goTypes = []any{ + (ThrottlingStrategy)(0), // 0: querythrottler.ThrottlingStrategy + (*Config)(nil), // 1: querythrottler.Config + (*TabletStrategyConfig)(nil), // 2: querythrottler.TabletStrategyConfig + (*StatementRuleSet)(nil), // 3: querythrottler.StatementRuleSet + (*MetricRuleSet)(nil), // 4: querythrottler.MetricRuleSet + (*MetricRule)(nil), // 5: querythrottler.MetricRule + (*ThrottleThreshold)(nil), // 6: querythrottler.ThrottleThreshold + nil, // 7: querythrottler.TabletStrategyConfig.TabletRulesEntry + nil, // 8: querythrottler.StatementRuleSet.StatementRulesEntry + nil, // 9: querythrottler.MetricRuleSet.MetricRulesEntry +} +var file_querythrottler_proto_depIdxs = []int32{ + 0, // 0: querythrottler.Config.strategy:type_name -> querythrottler.ThrottlingStrategy + 2, // 1: querythrottler.Config.tablet_strategy_config:type_name -> querythrottler.TabletStrategyConfig + 7, // 2: querythrottler.TabletStrategyConfig.tablet_rules:type_name -> querythrottler.TabletStrategyConfig.TabletRulesEntry + 8, // 3: querythrottler.StatementRuleSet.statement_rules:type_name -> querythrottler.StatementRuleSet.StatementRulesEntry + 9, // 4: querythrottler.MetricRuleSet.metric_rules:type_name -> querythrottler.MetricRuleSet.MetricRulesEntry + 6, // 5: querythrottler.MetricRule.thresholds:type_name -> querythrottler.ThrottleThreshold + 3, // 6: querythrottler.TabletStrategyConfig.TabletRulesEntry.value:type_name -> querythrottler.StatementRuleSet + 4, // 7: querythrottler.StatementRuleSet.StatementRulesEntry.value:type_name -> querythrottler.MetricRuleSet + 5, // 8: querythrottler.MetricRuleSet.MetricRulesEntry.value:type_name -> querythrottler.MetricRule + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_querythrottler_proto_init() } +func file_querythrottler_proto_init() { + if File_querythrottler_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_querythrottler_proto_rawDesc), len(file_querythrottler_proto_rawDesc)), + NumEnums: 1, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_querythrottler_proto_goTypes, + DependencyIndexes: file_querythrottler_proto_depIdxs, + EnumInfos: file_querythrottler_proto_enumTypes, + MessageInfos: file_querythrottler_proto_msgTypes, + }.Build() + File_querythrottler_proto = out.File + file_querythrottler_proto_goTypes = nil + file_querythrottler_proto_depIdxs = nil +} diff --git a/go/vt/proto/querythrottler/querythrottler_vtproto.pb.go b/go/vt/proto/querythrottler/querythrottler_vtproto.pb.go new file mode 100644 index 00000000000..fdae8651b95 --- /dev/null +++ b/go/vt/proto/querythrottler/querythrottler_vtproto.pb.go @@ -0,0 +1,1451 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20250313105119-ba97887b0a25 +// source: querythrottler.proto + +package querythrottler + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Config) CloneVT() *Config { + if m == nil { + return (*Config)(nil) + } + r := new(Config) + r.Enabled = m.Enabled + r.Strategy = m.Strategy + r.TabletStrategyConfig = m.TabletStrategyConfig.CloneVT() + r.DryRun = m.DryRun + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Config) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletStrategyConfig) CloneVT() *TabletStrategyConfig { + if m == nil { + return (*TabletStrategyConfig)(nil) + } + r := new(TabletStrategyConfig) + if rhs := m.TabletRules; rhs != nil { + tmpContainer := make(map[string]*StatementRuleSet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletRules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletStrategyConfig) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StatementRuleSet) CloneVT() *StatementRuleSet { + if m == nil { + return (*StatementRuleSet)(nil) + } + r := new(StatementRuleSet) + if rhs := m.StatementRules; rhs != nil { + tmpContainer := make(map[string]*MetricRuleSet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.StatementRules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StatementRuleSet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MetricRuleSet) CloneVT() *MetricRuleSet { + if m == nil { + return (*MetricRuleSet)(nil) + } + r := new(MetricRuleSet) + if rhs := m.MetricRules; rhs != nil { + tmpContainer := make(map[string]*MetricRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.MetricRules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MetricRuleSet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MetricRule) CloneVT() *MetricRule { + if m == nil { + return (*MetricRule)(nil) + } + r := new(MetricRule) + if rhs := m.Thresholds; rhs != nil { + tmpContainer := make([]*ThrottleThreshold, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Thresholds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MetricRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ThrottleThreshold) CloneVT() *ThrottleThreshold { + if m == nil { + return (*ThrottleThreshold)(nil) + } + r := new(ThrottleThreshold) + r.Above = m.Above + r.Throttle = m.Throttle + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ThrottleThreshold) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Config) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Config) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.TabletStrategyConfig != nil { + size, err := m.TabletStrategyConfig.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Strategy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Strategy)) + i-- + dAtA[i] = 0x10 + } + if m.Enabled { + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TabletStrategyConfig) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletStrategyConfig) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TabletStrategyConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TabletRules) > 0 { + for k := range m.TabletRules { + v := m.TabletRules[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatementRuleSet) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatementRuleSet) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StatementRuleSet) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.StatementRules) > 0 { + for k := range m.StatementRules { + v := m.StatementRules[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricRuleSet) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricRuleSet) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MetricRuleSet) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MetricRules) > 0 { + for k := range m.MetricRules { + v := m.MetricRules[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricRule) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricRule) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MetricRule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Thresholds) > 0 { + for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ThrottleThreshold) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThrottleThreshold) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ThrottleThreshold) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Throttle != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Throttle)) + i-- + dAtA[i] = 0x10 + } + if m.Above != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Above)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Config) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Enabled { + n += 2 + } + if m.Strategy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Strategy)) + } + if m.TabletStrategyConfig != nil { + l = m.TabletStrategyConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *TabletStrategyConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TabletRules) > 0 { + for k, v := range m.TabletRules { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StatementRuleSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StatementRules) > 0 { + for k, v := range m.StatementRules { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MetricRuleSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MetricRules) > 0 { + for k, v := range m.MetricRules { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MetricRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Thresholds) > 0 { + for _, e := range m.Thresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ThrottleThreshold) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Above != 0 { + n += 9 + } + if m.Throttle != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Throttle)) + } + n += len(m.unknownFields) + return n +} + +func (m *Config) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + m.Strategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Strategy |= ThrottlingStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletStrategyConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletStrategyConfig == nil { + m.TabletStrategyConfig = &TabletStrategyConfig{} + } + if err := m.TabletStrategyConfig.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TabletStrategyConfig) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TabletStrategyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TabletStrategyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletRules == nil { + m.TabletRules = make(map[string]*StatementRuleSet) + } + var mapkey string + var mapvalue *StatementRuleSet + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StatementRuleSet{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TabletRules[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatementRuleSet) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatementRuleSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatementRuleSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatementRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StatementRules == nil { + m.StatementRules = make(map[string]*MetricRuleSet) + } + var mapkey string + var mapvalue *MetricRuleSet + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricRuleSet{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.StatementRules[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricRuleSet) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricRuleSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricRuleSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricRules == nil { + m.MetricRules = make(map[string]*MetricRule) + } + var mapkey string + var mapvalue *MetricRule + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricRule{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricRules[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricRule) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Thresholds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Thresholds = append(m.Thresholds, &ThrottleThreshold{}) + if err := m.Thresholds[len(m.Thresholds)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThrottleThreshold) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThrottleThreshold: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThrottleThreshold: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Above", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Above = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) + } + m.Throttle = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Throttle |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index d7047b925d0..6cad59cd6fe 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -4460,6 +4460,7 @@ func (*InitReplicaResponse) Descriptor() ([]byte, []int) { type DemotePrimaryRequest struct { state protoimpl.MessageState `protogen:"open.v1"` + Force bool `protobuf:"varint,1,opt,name=force,proto3" json:"force,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -4494,6 +4495,13 @@ func (*DemotePrimaryRequest) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{92} } +func (x *DemotePrimaryRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + type DemotePrimaryResponse struct { state protoimpl.MessageState `protogen:"open.v1"` // PrimaryStatus represents the response from calling `SHOW BINARY LOG STATUS` on a primary that has been demoted. @@ -8684,8 +8692,9 @@ const file_tabletmanagerdata_proto_rawDesc = "" + "\x14replication_position\x18\x02 \x01(\tR\x13replicationPosition\x12&\n" + "\x0ftime_created_ns\x18\x03 \x01(\x03R\rtimeCreatedNs\x12\x1a\n" + "\bsemiSync\x18\x04 \x01(\bR\bsemiSync\"\x15\n" + - "\x13InitReplicaResponse\"\x16\n" + - "\x14DemotePrimaryRequest\"d\n" + + "\x13InitReplicaResponse\",\n" + + "\x14DemotePrimaryRequest\x12\x14\n" + + "\x05force\x18\x01 \x01(\bR\x05force\"d\n" + "\x15DemotePrimaryResponse\x12E\n" + "\x0eprimary_status\x18\x02 \x01(\v2\x1e.replicationdata.PrimaryStatusR\rprimaryStatusJ\x04\b\x01\x10\x02\"6\n" + "\x18UndoDemotePrimaryRequest\x12\x1a\n" + diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go index 0ef58ee633e..baf50f623e6 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go @@ -1748,6 +1748,7 @@ func (m *DemotePrimaryRequest) CloneVT() *DemotePrimaryRequest { return (*DemotePrimaryRequest)(nil) } r := new(DemotePrimaryRequest) + r.Force = m.Force if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -7270,6 +7271,16 @@ func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -12566,6 +12577,9 @@ func (m *DemotePrimaryRequest) SizeVT() (n int) { } var l int _ = l + if m.Force { + n += 2 + } n += len(m.unknownFields) return n } @@ -22601,6 +22615,26 @@ func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { return fmt.Errorf("proto: DemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index cdbc9c31e15..5825ef76a44 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -32,6 +32,7 @@ import ( reflect "reflect" sync "sync" unsafe "unsafe" + querythrottler "vitess.io/vitess/go/vt/proto/querythrottler" vtorcdata "vitess.io/vitess/go/vt/proto/vtorcdata" vttime "vitess.io/vitess/go/vt/proto/vttime" ) @@ -676,9 +677,11 @@ type Keyspace struct { // tablet's mysqld instance. SidecarDbName string `protobuf:"bytes,10,opt,name=sidecar_db_name,json=sidecarDbName,proto3" json:"sidecar_db_name,omitempty"` // Vtorc is the vtorc keyspace config/state for the keyspace. - VtorcState *vtorcdata.Keyspace `protobuf:"bytes,11,opt,name=vtorc_state,json=vtorcState,proto3" json:"vtorc_state,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + VtorcState *vtorcdata.Keyspace `protobuf:"bytes,11,opt,name=vtorc_state,json=vtorcState,proto3" json:"vtorc_state,omitempty"` + // QueryThrottler provides a flexible throttling configuration that supports multiple throttling strategies beyond the standard tablet throttling. + QueryThrottlerConfig *querythrottler.Config `protobuf:"bytes,20000,opt,name=query_throttler_config,json=queryThrottlerConfig,proto3" json:"query_throttler_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Keyspace) Reset() { @@ -760,6 +763,13 @@ func (x *Keyspace) GetVtorcState() *vtorcdata.Keyspace { return nil } +func (x *Keyspace) GetQueryThrottlerConfig() *querythrottler.Config { + if x != nil { + return x.QueryThrottlerConfig + } + return nil +} + // ShardReplication describes the MySQL replication relationships // whithin a cell. type ShardReplication struct { @@ -1169,8 +1179,10 @@ type SrvKeyspace struct { // shards and tablets. This is copied from the global keyspace // object. ThrottlerConfig *ThrottlerConfig `protobuf:"bytes,6,opt,name=throttler_config,json=throttlerConfig,proto3" json:"throttler_config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // QueryThrottler provides a flexible throttling configuration that supports multiple throttling strategies beyond the standard tablet throttling. + QueryThrottlerConfig *querythrottler.Config `protobuf:"bytes,20000,opt,name=query_throttler_config,json=queryThrottlerConfig,proto3" json:"query_throttler_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SrvKeyspace) Reset() { @@ -1217,6 +1229,13 @@ func (x *SrvKeyspace) GetThrottlerConfig() *ThrottlerConfig { return nil } +func (x *SrvKeyspace) GetQueryThrottlerConfig() *querythrottler.Config { + if x != nil { + return x.QueryThrottlerConfig + } + return nil +} + // CellInfo contains information about a cell. CellInfo objects are // stored in the global topology server, and describe how to reach // local topology servers. @@ -1785,7 +1804,7 @@ var File_topodata_proto protoreflect.FileDescriptor const file_topodata_proto_rawDesc = "" + "\n" + - "\x0etopodata.proto\x12\btopodata\x1a\x0fvtorcdata.proto\x1a\fvttime.proto\"2\n" + + "\x0etopodata.proto\x12\btopodata\x1a\x0fvtorcdata.proto\x1a\fvttime.proto\x1a\x14querythrottler.proto\"2\n" + "\bKeyRange\x12\x14\n" + "\x05start\x18\x01 \x01(\fR\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\fR\x03end\"3\n" + @@ -1834,7 +1853,7 @@ const file_topodata_proto_rawDesc = "" + "tabletType\x12\x14\n" + "\x05cells\x18\x02 \x03(\tR\x05cells\x12#\n" + "\rdenied_tables\x18\x04 \x03(\tR\fdeniedTables\x12\x16\n" + - "\x06frozen\x18\x05 \x01(\bR\x06frozenJ\x04\b\x03\x10\x04J\x04\b\x03\x10\x04J\x04\b\x05\x10\x06\"\x88\x03\n" + + "\x06frozen\x18\x05 \x01(\bR\x06frozenJ\x04\b\x03\x10\x04J\x04\b\x03\x10\x04J\x04\b\x05\x10\x06\"\xd8\x03\n" + "\bKeyspace\x12;\n" + "\rkeyspace_type\x18\x05 \x01(\x0e2\x16.topodata.KeyspaceTypeR\fkeyspaceType\x12#\n" + "\rbase_keyspace\x18\x06 \x01(\tR\fbaseKeyspace\x121\n" + @@ -1844,7 +1863,8 @@ const file_topodata_proto_rawDesc = "" + "\x0fsidecar_db_name\x18\n" + " \x01(\tR\rsidecarDbName\x124\n" + "\vvtorc_state\x18\v \x01(\v2\x13.vtorcdata.KeyspaceR\n" + - "vtorcStateJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03J\x04\b\x03\x10\x04J\x04\b\x04\x10\x05\"\x8b\x01\n" + + "vtorcState\x12N\n" + + "\x16query_throttler_config\x18\xa0\x9c\x01 \x01(\v2\x16.querythrottler.ConfigR\x14queryThrottlerConfigJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03J\x04\b\x03\x10\x04J\x04\b\x04\x10\x05\"\x8b\x01\n" + "\x10ShardReplication\x125\n" + "\x05nodes\x18\x01 \x03(\v2\x1f.topodata.ShardReplication.NodeR\x05nodes\x1a@\n" + "\x04Node\x128\n" + @@ -1887,12 +1907,13 @@ const file_topodata_proto_rawDesc = "" + "\x05value\x18\x02 \x01(\v2%.topodata.ThrottlerConfig.MetricNamesR\x05value:\x028\x01\x1aC\n" + "\x15MetricThresholdsEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\x01R\x05value:\x028\x01\"\x98\x03\n" + + "\x05value\x18\x02 \x01(\x01R\x05value:\x028\x01\"\xe8\x03\n" + "\vSrvKeyspace\x12G\n" + "\n" + "partitions\x18\x01 \x03(\v2'.topodata.SrvKeyspace.KeyspacePartitionR\n" + "partitions\x12D\n" + - "\x10throttler_config\x18\x06 \x01(\v2\x19.topodata.ThrottlerConfigR\x0fthrottlerConfig\x1a\xe1\x01\n" + + "\x10throttler_config\x18\x06 \x01(\v2\x19.topodata.ThrottlerConfigR\x0fthrottlerConfig\x12N\n" + + "\x16query_throttler_config\x18\xa0\x9c\x01 \x01(\v2\x16.querythrottler.ConfigR\x14queryThrottlerConfig\x1a\xe1\x01\n" + "\x11KeyspacePartition\x125\n" + "\vserved_type\x18\x01 \x01(\x0e2\x14.topodata.TabletTypeR\n" + "servedType\x12C\n" + @@ -1984,6 +2005,7 @@ var file_topodata_proto_goTypes = []any{ (*vttime.Time)(nil), // 30: vttime.Time (*vtorcdata.Shard)(nil), // 31: vtorcdata.Shard (*vtorcdata.Keyspace)(nil), // 32: vtorcdata.Keyspace + (*querythrottler.Config)(nil), // 33: querythrottler.Config } var file_topodata_proto_depIdxs = []int32{ 4, // 0: topodata.Tablet.alias:type_name -> topodata.TabletAlias @@ -2002,32 +2024,34 @@ var file_topodata_proto_depIdxs = []int32{ 30, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time 13, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig 32, // 15: topodata.Keyspace.vtorc_state:type_name -> vtorcdata.Keyspace - 24, // 16: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node - 2, // 17: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type - 4, // 18: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias - 3, // 19: topodata.ShardReference.key_range:type_name -> topodata.KeyRange - 3, // 20: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange - 30, // 21: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time - 25, // 22: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry - 27, // 23: topodata.ThrottlerConfig.app_checked_metrics:type_name -> topodata.ThrottlerConfig.AppCheckedMetricsEntry - 28, // 24: topodata.ThrottlerConfig.metric_thresholds:type_name -> topodata.ThrottlerConfig.MetricThresholdsEntry - 29, // 25: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition - 13, // 26: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 17, // 27: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig - 18, // 28: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster - 3, // 29: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange - 1, // 30: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType - 4, // 31: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias - 12, // 32: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule - 26, // 33: topodata.ThrottlerConfig.AppCheckedMetricsEntry.value:type_name -> topodata.ThrottlerConfig.MetricNames - 1, // 34: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType - 10, // 35: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference - 11, // 36: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl - 37, // [37:37] is the sub-list for method output_type - 37, // [37:37] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 33, // 16: topodata.Keyspace.query_throttler_config:type_name -> querythrottler.Config + 24, // 17: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node + 2, // 18: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type + 4, // 19: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias + 3, // 20: topodata.ShardReference.key_range:type_name -> topodata.KeyRange + 3, // 21: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange + 30, // 22: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time + 25, // 23: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry + 27, // 24: topodata.ThrottlerConfig.app_checked_metrics:type_name -> topodata.ThrottlerConfig.AppCheckedMetricsEntry + 28, // 25: topodata.ThrottlerConfig.metric_thresholds:type_name -> topodata.ThrottlerConfig.MetricThresholdsEntry + 29, // 26: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition + 13, // 27: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 33, // 28: topodata.SrvKeyspace.query_throttler_config:type_name -> querythrottler.Config + 17, // 29: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig + 18, // 30: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster + 3, // 31: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange + 1, // 32: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType + 4, // 33: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias + 12, // 34: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule + 26, // 35: topodata.ThrottlerConfig.AppCheckedMetricsEntry.value:type_name -> topodata.ThrottlerConfig.MetricNames + 1, // 36: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType + 10, // 37: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference + 11, // 38: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_topodata_proto_init() } diff --git a/go/vt/proto/topodata/topodata_vtproto.pb.go b/go/vt/proto/topodata/topodata_vtproto.pb.go index 5face1a2d9e..8f8bcd8dfec 100644 --- a/go/vt/proto/topodata/topodata_vtproto.pb.go +++ b/go/vt/proto/topodata/topodata_vtproto.pb.go @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" + querythrottler "vitess.io/vitess/go/vt/proto/querythrottler" vtorcdata "vitess.io/vitess/go/vt/proto/vtorcdata" vttime "vitess.io/vitess/go/vt/proto/vttime" ) @@ -208,6 +209,7 @@ func (m *Keyspace) CloneVT() *Keyspace { r.ThrottlerConfig = m.ThrottlerConfig.CloneVT() r.SidecarDbName = m.SidecarDbName r.VtorcState = m.VtorcState.CloneVT() + r.QueryThrottlerConfig = m.QueryThrottlerConfig.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -433,6 +435,7 @@ func (m *SrvKeyspace) CloneVT() *SrvKeyspace { } r := new(SrvKeyspace) r.ThrottlerConfig = m.ThrottlerConfig.CloneVT() + r.QueryThrottlerConfig = m.QueryThrottlerConfig.CloneVT() if rhs := m.Partitions; rhs != nil { tmpContainer := make([]*SrvKeyspace_KeyspacePartition, len(rhs)) for k, v := range rhs { @@ -1066,6 +1069,20 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.QueryThrottlerConfig != nil { + size, err := m.QueryThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x9 + i-- + dAtA[i] = 0xe2 + i-- + dAtA[i] = 0x82 + } if m.VtorcState != nil { size, err := m.VtorcState.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -1699,6 +1716,20 @@ func (m *SrvKeyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.QueryThrottlerConfig != nil { + size, err := m.QueryThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x9 + i-- + dAtA[i] = 0xe2 + i-- + dAtA[i] = 0x82 + } if m.ThrottlerConfig != nil { size, err := m.ThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -2190,6 +2221,10 @@ func (m *Keyspace) SizeVT() (n int) { l = m.VtorcState.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.QueryThrottlerConfig != nil { + l = m.QueryThrottlerConfig.SizeVT() + n += 3 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -2418,6 +2453,10 @@ func (m *SrvKeyspace) SizeVT() (n int) { l = m.ThrottlerConfig.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.QueryThrottlerConfig != nil { + l = m.QueryThrottlerConfig.SizeVT() + n += 3 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -4236,6 +4275,42 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 20000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryThrottlerConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueryThrottlerConfig == nil { + m.QueryThrottlerConfig = &querythrottler.Config{} + } + if err := m.QueryThrottlerConfig.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -5762,6 +5837,42 @@ func (m *SrvKeyspace) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 20000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryThrottlerConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueryThrottlerConfig == nil { + m.QueryThrottlerConfig = &querythrottler.Config{} + } + if err := m.QueryThrottlerConfig.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go index 4bfb408dafb..da9f831fcc6 100644 --- a/go/vt/schemadiff/schema.go +++ b/go/vt/schemadiff/schema.go @@ -134,21 +134,31 @@ func getForeignKeyParentTableNames(createTable *sqlparser.CreateTable) (names [] } // getViewDependentTableNames analyzes a CREATE VIEW definition and extracts all tables/views read by this view -func getViewDependentTableNames(createView *sqlparser.CreateView) (names []string) { +func getViewDependentTableNames(createView *sqlparser.CreateView) (names []string, cteNames []string) { + cteMap := make(map[string]bool) _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node := node.(type) { + case *sqlparser.CommonTableExpr: + if !cteMap[node.ID.String()] { + cteNames = append(cteNames, node.ID.String()) + cteMap[node.ID.String()] = true + } case *sqlparser.TableName: - names = append(names, node.Name.String()) + if _, isCte := cteMap[node.Name.String()]; !isCte { + names = append(names, node.Name.String()) + } case *sqlparser.AliasedTableExpr: if tableName, ok := node.Expr.(sqlparser.TableName); ok { - names = append(names, tableName.Name.String()) + if _, isCte := cteMap[tableName.Name.String()]; !isCte { + names = append(names, tableName.Name.String()) + } } // or, this could be a more complex expression, like a derived table `(select * from v1) as derived`, // in which case further Walk-ing will eventually find the "real" table name } return true, nil }, createView) - return names + return names, cteNames } // normalize is called as part of Schema creation process. The user may only get a hold of normalized schema. @@ -310,7 +320,7 @@ func (s *Schema) normalize(hints *DiffHints) error { continue } // Not handled. Is this view dependent on already handled objects? - dependentNames := getViewDependentTableNames(v.CreateView) + dependentNames, _ := getViewDependentTableNames(v.CreateView) if allNamesFoundInLowerLevel(dependentNames, iterationLevel) { s.sorted = append(s.sorted, v) dependencyLevels[v.Name()] = iterationLevel @@ -341,7 +351,7 @@ func (s *Schema) normalize(hints *DiffHints) error { if _, ok := dependencyLevels[v.Name()]; !ok { // We _know_ that in this iteration, at least one view is found unassigned a dependency level. // We gather all the errors. - dependentNames := getViewDependentTableNames(v.CreateView) + dependentNames, _ := getViewDependentTableNames(v.CreateView) missingReferencedEntities := []string{} for _, name := range dependentNames { if _, ok := dependencyLevels[name]; !ok { @@ -974,12 +984,16 @@ func (s *Schema) SchemaDiff(other *Schema, hints *DiffHints) (*SchemaDiff, error for _, diff := range schemaDiff.UnorderedDiffs() { switch diff := diff.(type) { case *CreateViewEntityDiff: - checkDependencies(diff, getViewDependentTableNames(diff.createView)) + dependentNames, _ := getViewDependentTableNames(diff.createView) + checkDependencies(diff, dependentNames) case *AlterViewEntityDiff: - checkDependencies(diff, getViewDependentTableNames(diff.from.CreateView)) - checkDependencies(diff, getViewDependentTableNames(diff.to.CreateView)) + fromDependentNames, _ := getViewDependentTableNames(diff.from.CreateView) + checkDependencies(diff, fromDependentNames) + toDependentNames, _ := getViewDependentTableNames(diff.to.CreateView) + checkDependencies(diff, toDependentNames) case *DropViewEntityDiff: - checkDependencies(diff, getViewDependentTableNames(diff.from.CreateView)) + dependentNames, _ := getViewDependentTableNames(diff.from.CreateView) + checkDependencies(diff, dependentNames) case *CreateTableEntityDiff: checkDependencies(diff, getForeignKeyParentTableNames(diff.CreateTable())) _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { @@ -1142,13 +1156,15 @@ func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *decl for _, node := range v.Select.GetColumns() { switch node := node.(type) { case *sqlparser.StarExpr: + dependentNames, cteNames := getViewDependentTableNames(v.CreateView) if tableName := node.TableName.Name.String(); tableName != "" { - for _, col := range schemaInformation.Tables[tableName].Columns { - name := sqlparser.Clone(col.Name) - columnNames = append(columnNames, &name) + if tbl, ok := schemaInformation.Tables[tableName]; ok { + for _, col := range tbl.Columns { + name := sqlparser.Clone(col.Name) + columnNames = append(columnNames, &name) + } } } else { - dependentNames := getViewDependentTableNames(v.CreateView) // add all columns from all referenced tables and views for _, entityName := range dependentNames { if schemaInformation.Tables[entityName] != nil { // is nil for dual/DUAL @@ -1159,7 +1175,10 @@ func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *decl } } } - if len(columnNames) == 0 { + if len(columnNames) == 0 && len(cteNames) == 0 { + // *-expressions that do not resolve to any columns are invalid in views. + // For CTEs, schemadiff does not analyze the list of columns returned by the CTE (even if the CTE defines it). + // TODO(shlomi): analyze CTE columns as well. return nil, &InvalidStarExprInViewError{View: v.Name()} } case *sqlparser.AliasedExpr: diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index 46d3a338338..c8e9b4520b0 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -176,6 +176,52 @@ func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) { assert.NoError(t, err) } +func TestNewSchemaFromQueriesViewWithCTEFail(t *testing.T) { + queries := []string{"create view v30 as with vcte as (select 1) select * from vcte2"} + _, err := NewSchemaFromQueries(NewTestEnv(), queries) + assert.Error(t, err) + assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v30", MissingReferencedEntities: []string{"dual", "vcte2"}}).Error()) +} + +func TestNewSchemaFromQueriesViewWithCTE(t *testing.T) { + tcases := []struct { + name string + queries []string + }{ + { + "no table", + []string{"create view v20 as with vcte as (select 1) select * from vcte"}, + }, + { + "with table", + []string{ + "create table orders (id int primary key, info int not null)", + "create view v21 as with vcte as (select * from orders) select * from vcte", + }, + }, + { + "with table and column aliasing", + []string{ + "create table orders (id int primary key, info int not null)", + "create view v22 as with vcte as (select id, info as val from orders) select * from vcte", + }, + }, + { + "with table and select all from cte", + []string{ + "create table orders (id int primary key, info int not null)", + "create view v22 as with vcte as (select id, info as val from orders) select vcte.* from vcte", + }, + }, + } + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + _, err := NewSchemaFromQueries(NewTestEnv(), tc.queries) + assert.NoError(t, err) + }) + } +} + func TestNewSchemaFromQueriesLoop(t *testing.T) { // v7 and v8 depend on each other queries := append(schemaTestCreateQueries, @@ -213,6 +259,7 @@ func TestGetViewDependentTableNames(t *testing.T) { name string view string tables []string + ctes []string }{ { view: "create view v6 as select * from v4", @@ -242,6 +289,16 @@ func TestGetViewDependentTableNames(t *testing.T) { view: "create view v9 as select 1", tables: []string{"dual"}, }, + { + view: "create view v20 as with vcte as (select 1) select * from vcte", + tables: []string{"dual"}, + ctes: []string{"vcte"}, + }, + { + view: "create view v21 as with vcte as (select * from orders) select * from vcte", + tables: []string{"orders"}, + ctes: []string{"vcte"}, + }, } for _, ts := range tt { t.Run(ts.view, func(t *testing.T) { @@ -250,8 +307,9 @@ func TestGetViewDependentTableNames(t *testing.T) { createView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) - tables := getViewDependentTableNames(createView) + tables, ctes := getViewDependentTableNames(createView) assert.Equal(t, ts.tables, tables) + assert.Equal(t, ts.ctes, ctes) }) } } diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 6ed5762f96e..29c3043dd60 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -2657,11 +2657,14 @@ func (api *API) VExplain(ctx context.Context, req *vtadminpb.VExplainRequest) (* return nil, err } - if _, ok := stmt.(*sqlparser.VExplainStmt); !ok { + vexplainStmt, ok := stmt.(*sqlparser.VExplainStmt) + if !ok { return nil, vterrors.VT09017("Invalid VExplain statement") } - response, err := c.DB.VExplain(ctx, req.GetSql(), stmt.(*sqlparser.VExplainStmt)) + // Canonicalize the SQL using the AST, to prevent use of raw user input. + canonicalQuery := sqlparser.String(vexplainStmt) + response, err := c.DB.VExplain(ctx, canonicalQuery, vexplainStmt) if err != nil { return nil, err diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 446cc836402..79b9cfb4e7f 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -1017,7 +1017,7 @@ func (itmc *internalTabletManagerClient) ReadReparentJournalInfo(ctx context.Con return 0, errors.New("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) DemotePrimary(context.Context, *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { +func (itmc *internalTabletManagerClient) DemotePrimary(context.Context, *topodatapb.Tablet, bool) (*replicationdatapb.PrimaryStatus, error) { return nil, errors.New("not implemented in vtcombo") } diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 00b1df26eba..055cc466382 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -554,7 +554,7 @@ func (fake *TabletManagerClient) ChangeType(ctx context.Context, tablet *topodat } // DemotePrimary is part of the tmclient.TabletManagerClient interface. -func (fake *TabletManagerClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { +func (fake *TabletManagerClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, force bool) (*replicationdatapb.PrimaryStatus, error) { if fake.DemotePrimaryResults == nil { return nil, assert.AnError } diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 5317a56ee18..b0133499347 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -19,7 +19,6 @@ package reparentutil import ( "context" "errors" - "fmt" "slices" "testing" "time" @@ -4673,7 +4672,7 @@ func getRelayLogPosition(gtidSets ...string) string { res += "," } first = false - res += fmt.Sprintf("%s:%s", uuids[idx], set) + res += uuids[idx] + ":" + set } return res } diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index dcd6dc7c590..52b51b52b8f 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -275,7 +275,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( demoteCtx, demoteCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer demoteCancel() - primaryStatus, err := pr.tmc.DemotePrimary(demoteCtx, currentPrimary.Tablet) + primaryStatus, err := pr.tmc.DemotePrimary(demoteCtx, currentPrimary.Tablet, false) if err != nil { return vterrors.Wrapf(err, "failed to DemotePrimary on current primary %v: %v", currentPrimary.AliasString(), err) } @@ -426,7 +426,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( // tablet type), that's already in read-only. pr.logger.Infof("demoting tablet %v", alias) - primaryStatus, err := pr.tmc.DemotePrimary(stopAllCtx, tablet) + primaryStatus, err := pr.tmc.DemotePrimary(stopAllCtx, tablet, false) if err != nil { rec.RecordError(vterrors.Wrapf(err, "DemotePrimary(%v) failed on contested primary", alias)) diff --git a/go/vt/vtctl/reparentutil/policy/durability.go b/go/vt/vtctl/reparentutil/policy/durability.go index bad6846ef29..1ff88267fbf 100644 --- a/go/vt/vtctl/reparentutil/policy/durability.go +++ b/go/vt/vtctl/reparentutil/policy/durability.go @@ -91,6 +91,8 @@ type Durabler interface { SemiSyncAckers(*topodatapb.Tablet) int // IsReplicaSemiSync returns whether the "replica" should send semi-sync acks if "primary" were to become the PRIMARY instance IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool + // HasSemiSync returns whether the durability policy uses semi-sync. + HasSemiSync() bool } func RegisterDurability(name string, newDurablerFunc NewDurabler) { @@ -142,6 +144,11 @@ func IsReplicaSemiSync(durability Durabler, primary, replica *topodatapb.Tablet) return durability.IsReplicaSemiSync(primary, replica) } +// HasSemiSync returns true if the durability policy uses semi-sync. +func HasSemiSync(durability Durabler) bool { + return durability.HasSemiSync() +} + //======================================================================= // durabilityNone has no semi-sync and returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else @@ -166,6 +173,11 @@ func (d *durabilityNone) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) return false } +// HasSemiSync implements the Durabler interface +func (d *durabilityNone) HasSemiSync() bool { + return false +} + //======================================================================= // durabilitySemiSync has 1 semi-sync setup. It only allows Primary and Replica type servers to acknowledge semi sync @@ -199,6 +211,11 @@ func (d *durabilitySemiSync) IsReplicaSemiSync(primary, replica *topodatapb.Tabl return false } +// HasSemiSync implements the Durabler interface +func (d *durabilitySemiSync) HasSemiSync() bool { + return true +} + //======================================================================= // durabilityCrossCell has 1 semi-sync setup. It only allows Primary and Replica type servers from a different cell to acknowledge semi sync. @@ -233,6 +250,11 @@ func (d *durabilityCrossCell) IsReplicaSemiSync(primary, replica *topodatapb.Tab return false } +// HasSemiSync implements the Durabler interface +func (d *durabilityCrossCell) HasSemiSync() bool { + return true +} + //======================================================================= // durabilityTest is like durabilityNone. It overrides the type for a specific tablet to prefer. It is only meant to be used for testing purposes! @@ -260,3 +282,8 @@ func (d *durabilityTest) SemiSyncAckers(tablet *topodatapb.Tablet) int { func (d *durabilityTest) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { return false } + +// HasSemiSync implements the Durabler interface +func (d *durabilityTest) HasSemiSync() bool { + return false +} diff --git a/go/vt/vtctl/reparentutil/policy/durability_test.go b/go/vt/vtctl/reparentutil/policy/durability_test.go index 441275f29bf..e3a4710b2d1 100644 --- a/go/vt/vtctl/reparentutil/policy/durability_test.go +++ b/go/vt/vtctl/reparentutil/policy/durability_test.go @@ -331,3 +331,10 @@ func TestDurabilityTest(t *testing.T) { }) } } + +func TestHasSemiSync(t *testing.T) { + require.False(t, HasSemiSync(&durabilityNone{})) + require.False(t, HasSemiSync(&durabilityTest{})) + require.True(t, HasSemiSync(&durabilitySemiSync{})) + require.True(t, HasSemiSync(&durabilityCrossCell{})) +} diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index 096cb7166ee..e13a3e0dd44 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -255,7 +255,7 @@ func stopReplicationAndBuildStatusMaps( if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { var primaryStatus *replicationdatapb.PrimaryStatus - primaryStatus, err = tmc.DemotePrimary(groupCtx, tabletInfo.Tablet) + primaryStatus, err = tmc.DemotePrimary(groupCtx, tabletInfo.Tablet, true /* force */) if err != nil { msg := "replica %v thinks it's primary but we failed to demote it: %v" err = vterrors.Wrapf(err, msg, alias, err) diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index be0b47e7246..d0fedde0f8a 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -224,7 +224,7 @@ type stopReplicationAndBuildStatusMapsTestTMClient struct { stopReplicationAndGetStatusDelays map[string]time.Duration } -func (fake *stopReplicationAndBuildStatusMapsTestTMClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { +func (fake *stopReplicationAndBuildStatusMapsTestTMClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, force bool) (*replicationdatapb.PrimaryStatus, error) { if tablet.Alias == nil { return nil, assert.AnError } diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 5b8a40e23f1..883465b9903 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -1843,6 +1843,8 @@ func TestInsertGeneratorSharded(t *testing.T) { InsertIDChanged: true, } utils.MustMatch(t, wantResult, result) + // Verify that LastInsertId is set in the session (regression test for #18946) + assert.EqualValues(t, 1, session.LastInsertId) } func TestInsertAutoincSharded(t *testing.T) { @@ -1899,6 +1901,8 @@ func TestInsertGeneratorUnsharded(t *testing.T) { RowsAffected: 1, } utils.MustMatch(t, wantResult, result) + // Verify that LastInsertId is set in the session (regression test for #18946) + assert.EqualValues(t, 1, session.LastInsertId) } func TestInsertAutoincUnsharded(t *testing.T) { diff --git a/go/vt/vtgate/executorcontext/vcursor_impl.go b/go/vt/vtgate/executorcontext/vcursor_impl.go index fe4bee5f868..fc98e1f37c0 100644 --- a/go/vt/vtgate/executorcontext/vcursor_impl.go +++ b/go/vt/vtgate/executorcontext/vcursor_impl.go @@ -715,6 +715,9 @@ func (vc *VCursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Pr continue } vc.logOpTraffic(primitive, res) + if res != nil && res.InsertIDUpdated() { + vc.SafeSession.LastInsertId = res.InsertID + } return res, err } return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available") diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index dd135154f9f..a50b45235fd 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -109,6 +109,8 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt if err != nil { return nil, nil, err } + // Remove keyspace qualifiers from all table references (including foreign key references). + sqlparser.RemoveSpecificKeyspace(ddlStatement, keyspace.Name) err = checkFKError(vschema, ddlStatement, keyspace) case *sqlparser.CreateView: destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, cfg, ddl.Select, ddl) diff --git a/go/vt/vtgate/planbuilder/operators/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewriters.go index 6d06a10dee2..5f35a52950d 100644 --- a/go/vt/vtgate/planbuilder/operators/rewriters.go +++ b/go/vt/vtgate/planbuilder/operators/rewriters.go @@ -342,6 +342,13 @@ func topDown( return newOp, anythingChanged } + // If the rewriter replaced the operator with a different one, we need to re-visit + // the new operator to give it a chance to be processed + if anythingChanged.Changed() && newOp != root { + revisitedOp, revisitChanged := topDown(newOp, rootID, resolveID, rewriter, shouldVisit, isRoot) + return revisitedOp, anythingChanged.Merge(revisitChanged) + } + if anythingChanged.Changed() { root = newOp } diff --git a/go/vt/vtgate/planbuilder/operators/rewriters_test.go b/go/vt/vtgate/planbuilder/operators/rewriters_test.go new file mode 100644 index 00000000000..1f92faa9f04 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/rewriters_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +// TestTopDown_RevisitsReplacedOperators verifies that when an operator is replaced +// during TopDown traversal, the new operator gets re-visited. +// +// This test demonstrates the bug fix where TopDown now re-visits operators that are returned +// as replacements. Without the fix, if a visitor returns a different operator, that new +// operator would be inserted into the tree but never visited itself. +// +// Test scenario: +// 1. Create a simple operator +// 2. First visit returns a different operator (simulating what happens in offset planning) +// 3. Verify that the replacement operator is also visited +// +// Without the fix, the second operator would never be visited because TopDown would just +// replace the first with the second and descend into the second's inputs without visiting +// the second itself. +func TestTopDown_RevisitsReplacedOperators(t *testing.T) { + visited := map[semantics.TableSet]bool{} + + // Create two simple operators to track visits + id0 := semantics.SingleTableSet(0) + id1 := semantics.SingleTableSet(1) + op1 := &fakeOp{id: id0} + op2 := &fakeOp{id: id1} + + // Create visitor that replaces op1 with op2 and marks each operator as visited + + visitor := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + visited[TableID(in)] = true + if in == op1 { + return op2, Rewrote("replaced operator") + } + return in, NoRewrite + } + + // Run TopDown traversal + result := TopDown(op1, func(op Operator) semantics.TableSet { + return semantics.EmptyTableSet() + }, visitor, func(Operator) VisitRule { + return VisitChildren + }) + + // Verify both operators were visited + assert.True(t, visited[id0], "first operator should have been visited") + assert.True(t, visited[id1], "second operator (replacement) should have been visited") + assert.Equal(t, op2, result, "result should be the second operator") +} diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 2f5ab158fa6..f91f4b55586 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -250,6 +250,10 @@ func findBestJoin( continue } plan := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) + if _, ok := plan.(*Route); ok { + // we were able to merge the two inputs - we're done for now + return plan, i, j + } if bestPlan == nil || CostOf(plan) < CostOf(bestPlan) { bestPlan = plan // remember which plans we based on, so we can remove them later diff --git a/go/vt/vtgate/planbuilder/operators/utils_test.go b/go/vt/vtgate/planbuilder/operators/utils_test.go index babf6c91afb..31ad15840f7 100644 --- a/go/vt/vtgate/planbuilder/operators/utils_test.go +++ b/go/vt/vtgate/planbuilder/operators/utils_test.go @@ -41,8 +41,7 @@ func (f *fakeOp) Inputs() []Operator { } func (f *fakeOp) SetInputs(operators []Operator) { - // TODO implement me - panic("implement me") + f.inputs = operators } func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json index 3b84004ca99..b51de04ff47 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json @@ -683,5 +683,105 @@ "main.function_default" ] } + }, + { + "comment": "create table with foreign key reference without keyspace qualifier", + "query": "create table t1(id bigint, t2_id bigint, primary key(id), foreign key (t2_id) references t2(id))", + "plan": { + "Type": "DirectDDL", + "QueryType": "DDL", + "Original": "create table t1(id bigint, t2_id bigint, primary key(id), foreign key (t2_id) references t2(id))", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "Query": "create table t1 (\n\tid bigint,\n\tt2_id bigint,\n\tprimary key (id),\n\tforeign key (t2_id) references t2 (id)\n)" + }, + "TablesUsed": [ + "main.t1" + ] + } + }, + { + "comment": "create table with foreign key reference with keyspace qualifier", + "query": "create table user.t1(id bigint, t2_id bigint, primary key(id), foreign key (t2_id) references user.t2(id))", + "plan": { + "Type": "DirectDDL", + "QueryType": "DDL", + "Original": "create table user.t1(id bigint, t2_id bigint, primary key(id), foreign key (t2_id) references user.t2(id))", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "create table t1 (\n\tid bigint,\n\tt2_id bigint,\n\tprimary key (id),\n\tforeign key (t2_id) references t2 (id)\n)" + }, + "TablesUsed": [ + "user.t1" + ] + } + }, + { + "comment": "create table with multiple foreign keys with keyspace qualifiers", + "query": "create table user.orders(order_id bigint, customer_id bigint, product_id bigint, primary key(order_id), foreign key (customer_id) references user.customers(id), foreign key (product_id) references user.products(id))", + "plan": { + "Type": "DirectDDL", + "QueryType": "DDL", + "Original": "create table user.orders(order_id bigint, customer_id bigint, product_id bigint, primary key(order_id), foreign key (customer_id) references user.customers(id), foreign key (product_id) references user.products(id))", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "create table orders (\n\torder_id bigint,\n\tcustomer_id bigint,\n\tproduct_id bigint,\n\tprimary key (order_id),\n\tforeign key (customer_id) references customers (id),\n\tforeign key (product_id) references products (id)\n)" + }, + "TablesUsed": [ + "user.orders" + ] + } + }, + { + "comment": "alter table add foreign key with keyspace qualifier", + "query": "alter table user.t1 add foreign key (t2_id) references user.t2(id)", + "plan": { + "Type": "DirectDDL", + "QueryType": "DDL", + "Original": "alter table user.t1 add foreign key (t2_id) references user.t2(id)", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "alter table t1 add foreign key (t2_id) references t2 (id)" + }, + "TablesUsed": [ + "user.t1" + ] + } + }, + { + "comment": "create table with foreign key with ON DELETE and ON UPDATE clauses and keyspace qualifier", + "query": "create table user.employees(emp_id bigint, dept_id bigint, primary key(emp_id), foreign key (dept_id) references user.departments(dept_id) on delete set null on update cascade)", + "plan": { + "Type": "DirectDDL", + "QueryType": "DDL", + "Original": "create table user.employees(emp_id bigint, dept_id bigint, primary key(emp_id), foreign key (dept_id) references user.departments(dept_id) on delete set null on update cascade)", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "create table employees (\n\temp_id bigint,\n\tdept_id bigint,\n\tprimary key (emp_id),\n\tforeign key (dept_id) references departments (dept_id) on delete set null on update cascade\n)" + }, + "TablesUsed": [ + "user.employees" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json index cb36a8de23c..5388512f127 100644 --- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json @@ -753,7 +753,7 @@ "Sharded": true }, "FieldQuery": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where 1 != 1", - "Query": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where rr.bar = sr.bar and u.id = ue.user_id and sr.foo = ue.foo" + "Query": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where u.id = ue.user_id and rr.bar = sr.bar and sr.foo = ue.foo" }, "TablesUsed": [ "user.ref", diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index 32430b9126a..be83726f0e4 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -68,16 +68,23 @@ var ( logCollations = logutil.NewThrottledLogger("CollationInconsistent", 1*time.Minute) ) +func registerTabletGatewayFlags(fs *pflag.FlagSet) { + utils.SetFlagStringVar(fs, &CellsToWatch, "cells-to-watch", "", "comma-separated list of cells for watching tablets") + utils.SetFlagDurationVar(fs, &initialTabletTimeout, "gateway-initial-tablet-timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") + fs.IntVar(&retryCount, "retry-count", 2, "retry count") + fs.BoolVar(&balancerEnabled, "enable-balancer", false, "(DEPRECATED: use --vtgate-balancer-mode instead) Enable the tablet balancer to evenly spread query load for a given tablet type") + fs.StringVar(&balancerModeFlag, "vtgate-balancer-mode", "", fmt.Sprintf("Tablet balancer mode (options: %s). Defaults to 'cell' which shuffles tablets in the local cell.", strings.Join(balancer.GetAvailableModeNames(), ", "))) + fs.StringSliceVar(&balancerVtgateCells, "balancer-vtgate-cells", []string{}, "Comma-separated list of cells that contain vttablets. For 'prefer-cell' mode, this is required. For 'random' mode, this is optional and filters tablets to those cells.") + fs.StringSliceVar(&balancerKeyspaces, "balancer-keyspaces", []string{}, "Comma-separated list of keyspaces for which to use the balancer (optional). If empty, applies to all keyspaces.") +} + +func registerVtcomboTabletGatewayFlags(fs *pflag.FlagSet) { + utils.SetFlagDurationVar(fs, &initialTabletTimeout, "gateway-initial-tablet-timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") +} + func init() { - servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { - utils.SetFlagStringVar(fs, &CellsToWatch, "cells-to-watch", "", "comma-separated list of cells for watching tablets") - utils.SetFlagDurationVar(fs, &initialTabletTimeout, "gateway-initial-tablet-timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") - fs.IntVar(&retryCount, "retry-count", 2, "retry count") - fs.BoolVar(&balancerEnabled, "enable-balancer", false, "(DEPRECATED: use --vtgate-balancer-mode instead) Enable the tablet balancer to evenly spread query load for a given tablet type") - fs.StringVar(&balancerModeFlag, "vtgate-balancer-mode", "", fmt.Sprintf("Tablet balancer mode (options: %s). Defaults to 'cell' which shuffles tablets in the local cell.", strings.Join(balancer.GetAvailableModeNames(), ", "))) - fs.StringSliceVar(&balancerVtgateCells, "balancer-vtgate-cells", []string{}, "Comma-separated list of cells that contain vttablets. For 'prefer-cell' mode, this is required. For 'random' mode, this is optional and filters tablets to those cells.") - fs.StringSliceVar(&balancerKeyspaces, "balancer-keyspaces", []string{}, "Comma-separated list of keyspaces for which to use the balancer (optional). If empty, applies to all keyspaces.") - }) + servenv.OnParseFor("vtgate", registerTabletGatewayFlags) + servenv.OnParseFor("vtcombo", registerVtcomboTabletGatewayFlags) } // TabletGateway implements the Gateway interface. diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go index d1c9da62210..2378c4acb54 100644 --- a/go/vt/vttablet/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -378,7 +378,7 @@ func (client *FakeTabletManagerClient) ReadReparentJournalInfo(ctx context.Conte } // DemotePrimary is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { +func (client *FakeTabletManagerClient) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, force bool) (*replicationdatapb.PrimaryStatus, error) { return nil, nil } diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index c094d7f17e0..8d76cf45790 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -103,17 +103,25 @@ type tmc struct { client tabletmanagerservicepb.TabletManagerClient } -type addrTmcMap map[string]*tmc +type tmcEntry struct { + once sync.Once + tmc *tmc + err error +} + +type addrTmcMap map[string]*tmcEntry // grpcClient implements both dialer and poolDialer. type grpcClient struct { // This cache of connections is to maximize QPS for ExecuteFetchAs{Dba,App}, // CheckThrottler and FullStatus. Note we'll keep the clients open and close them upon Close() only. // But that's OK because usually the tasks that use them are one-purpose only. - // The map is protected by the mutex. - mu sync.Mutex + // rpcClientMapMu protects rpcClientMap. + rpcClientMapMu sync.Mutex rpcClientMap map[string]chan *tmc - rpcDialPoolMap map[DialPoolGroup]addrTmcMap + // rpcDialPoolMapMu protects rpcDialPoolMap. + rpcDialPoolMapMu sync.Mutex + rpcDialPoolMap map[DialPoolGroup]addrTmcMap } type dialer interface { @@ -185,16 +193,25 @@ func (client *grpcClient) dialPool(ctx context.Context, tablet *topodatapb.Table return nil, vterrors.FromGRPC(err) } - client.mu.Lock() - if client.rpcClientMap == nil { - client.rpcClientMap = make(map[string]chan *tmc) - } - c, ok := client.rpcClientMap[addr] - if !ok { + c, isEmpty := func() (chan *tmc, bool) { + client.rpcClientMapMu.Lock() + defer client.rpcClientMapMu.Unlock() + + if client.rpcClientMap == nil { + client.rpcClientMap = make(map[string]chan *tmc) + } + c, ok := client.rpcClientMap[addr] + if ok { + return c, false + } + c = make(chan *tmc, concurrency) client.rpcClientMap[addr] = c - client.mu.Unlock() + return c, true + }() + // If the channel is empty, populate it with connections. + if isEmpty { for i := 0; i < cap(c); i++ { tm, err := client.createTmc(ctx, addr, opt) if err != nil { @@ -202,8 +219,6 @@ func (client *grpcClient) dialPool(ctx context.Context, tablet *topodatapb.Table } c <- tm } - } else { - client.mu.Unlock() } result := <-c @@ -218,44 +233,81 @@ func (client *grpcClient) dialDedicatedPool(ctx context.Context, dialPoolGroup D return nil, nil, err } - client.mu.Lock() - defer client.mu.Unlock() - if client.rpcDialPoolMap == nil { - client.rpcDialPoolMap = make(map[DialPoolGroup]addrTmcMap) - } - if _, ok := client.rpcDialPoolMap[dialPoolGroup]; !ok { - client.rpcDialPoolMap[dialPoolGroup] = make(addrTmcMap) - } - m := client.rpcDialPoolMap[dialPoolGroup] - if _, ok := m[addr]; !ok { - tm, err := client.createTmc(ctx, addr, opt) - if err != nil { - return nil, nil, err + entry := func() *tmcEntry { + client.rpcDialPoolMapMu.Lock() + defer client.rpcDialPoolMapMu.Unlock() + + if client.rpcDialPoolMap == nil { + client.rpcDialPoolMap = make(map[DialPoolGroup]addrTmcMap) + } + if _, ok := client.rpcDialPoolMap[dialPoolGroup]; !ok { + client.rpcDialPoolMap[dialPoolGroup] = make(addrTmcMap) + } + + poolEntries := client.rpcDialPoolMap[dialPoolGroup] + entry, ok := poolEntries[addr] + if ok { + return entry } - m[addr] = tm + + entry = &tmcEntry{} + poolEntries[addr] = entry + return entry + }() + + // Initialize connection exactly once, without holding the mutex + entry.once.Do(func() { + entry.tmc, entry.err = client.createTmc(ctx, addr, opt) + }) + + if entry.err != nil { + return nil, nil, entry.err } + invalidator := func() { - client.mu.Lock() - defer client.mu.Unlock() - if tm := m[addr]; tm != nil && tm.cc != nil { - tm.cc.Close() + client.rpcDialPoolMapMu.Lock() + defer client.rpcDialPoolMapMu.Unlock() + + if entry.tmc != nil && entry.tmc.cc != nil { + entry.tmc.cc.Close() + } + + if poolEntries, ok := client.rpcDialPoolMap[dialPoolGroup]; ok { + delete(poolEntries, addr) } - delete(m, addr) } - return m[addr].client, invalidator, nil + return entry.tmc.client, invalidator, nil } // Close is part of the tmclient.TabletManagerClient interface. func (client *grpcClient) Close() { - client.mu.Lock() - defer client.mu.Unlock() - for _, c := range client.rpcClientMap { - close(c) - for ch := range c { - ch.cc.Close() + func() { + client.rpcClientMapMu.Lock() + defer client.rpcClientMapMu.Unlock() + + for _, c := range client.rpcClientMap { + close(c) + for ch := range c { + ch.cc.Close() + } } - } - client.rpcClientMap = nil + client.rpcClientMap = nil + }() + + // Close dedicated pools + func() { + client.rpcDialPoolMapMu.Lock() + defer client.rpcDialPoolMapMu.Unlock() + + for _, addrMap := range client.rpcDialPoolMap { + for _, tm := range addrMap { + if tm != nil && tm.tmc != nil && tm.tmc.cc != nil { + tm.tmc.cc.Close() + } + } + } + client.rpcDialPoolMap = nil + }() } // @@ -1191,13 +1243,13 @@ func (client *Client) InitReplica(ctx context.Context, tablet *topodatapb.Tablet } // DemotePrimary is part of the tmclient.TabletManagerClient interface. -func (client *Client) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { +func (client *Client) DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, force bool) (*replicationdatapb.PrimaryStatus, error) { c, closer, err := client.dialer.dial(ctx, tablet) if err != nil { return nil, err } defer closer.Close() - response, err := c.DemotePrimary(ctx, &tabletmanagerdatapb.DemotePrimaryRequest{}) + response, err := c.DemotePrimary(ctx, &tabletmanagerdatapb.DemotePrimaryRequest{Force: force}) if err != nil { return nil, vterrors.FromGRPC(err) } diff --git a/go/vt/vttablet/grpctmclient/client_test.go b/go/vt/vttablet/grpctmclient/client_test.go index 45869a84b0c..109744eddac 100644 --- a/go/vt/vttablet/grpctmclient/client_test.go +++ b/go/vt/vttablet/grpctmclient/client_test.go @@ -65,11 +65,12 @@ func TestDialDedicatedPool(t *testing.T) { assert.NotEmpty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) - c := rpcClient.rpcDialPoolMap[dialPoolGroupThrottler][addr] - assert.NotNil(t, c) - assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, c.cc.GetState()) + entry := rpcClient.rpcDialPoolMap[dialPoolGroupThrottler][addr] + assert.NotNil(t, entry) + assert.NotNil(t, entry.tmc) + assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, entry.tmc.cc.GetState()) - cachedTmc = c + cachedTmc = entry.tmc }) t.Run("CheckThrottler", func(t *testing.T) { @@ -145,22 +146,31 @@ func TestDialPool(t *testing.T) { _, err := client.CheckThrottler(ctx, tablet, req) assert.Error(t, err) }) + t.Run("post throttler maps", func(t *testing.T) { rpcClient, ok := client.dialer.(*grpcClient) require.True(t, ok) - rpcClient.mu.Lock() - defer rpcClient.mu.Unlock() + func() { + rpcClient.rpcDialPoolMapMu.Lock() + defer rpcClient.rpcDialPoolMapMu.Unlock() - assert.NotEmpty(t, rpcClient.rpcDialPoolMap) - assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) - assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + }() - assert.NotEmpty(t, rpcClient.rpcClientMap) - assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + func() { + rpcClient.rpcClientMapMu.Lock() + defer rpcClient.rpcClientMapMu.Unlock() + + assert.NotEmpty(t, rpcClient.rpcClientMap) + assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + }() assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, cachedTmc.cc.GetState()) }) + t.Run("ExecuteFetchAsDba", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -174,16 +184,22 @@ func TestDialPool(t *testing.T) { rpcClient, ok := client.dialer.(*grpcClient) require.True(t, ok) - rpcClient.mu.Lock() - defer rpcClient.mu.Unlock() - - assert.NotEmpty(t, rpcClient.rpcDialPoolMap) - assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) - assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) - - // The default pools are unaffected. Invalidator does not run, connections are not closed. - assert.NotEmpty(t, rpcClient.rpcClientMap) - assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + func() { + rpcClient.rpcDialPoolMapMu.Lock() + defer rpcClient.rpcDialPoolMapMu.Unlock() + + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + }() + + func() { + rpcClient.rpcClientMapMu.Lock() + defer rpcClient.rpcClientMapMu.Unlock() + // The default pools are unaffected. Invalidator does not run, connections are not closed. + assert.NotEmpty(t, rpcClient.rpcClientMap) + assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + }() assert.NotNil(t, cachedTmc) assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, cachedTmc.cc.GetState()) diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index 23a6f88794f..3f81062dc60 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -607,7 +607,7 @@ func (s *server) DemotePrimary(ctx context.Context, request *tabletmanagerdatapb defer s.tm.HandleRPCPanic(ctx, "DemotePrimary", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.DemotePrimaryResponse{} - status, err := s.tm.DemotePrimary(ctx) + status, err := s.tm.DemotePrimary(ctx, request.Force) if err == nil { response.PrimaryStatus = status } diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index 5a5b2cb99c3..9e341662ae4 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -148,7 +148,7 @@ type RPCTM interface { InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64, semiSync bool) error - DemotePrimary(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) + DemotePrimary(ctx context.Context, force bool) (*replicationdatapb.PrimaryStatus, error) UndoDemotePrimary(ctx context.Context, semiSync bool) error diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index b0a4ff2ccd6..a1f3f7c2b11 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -548,20 +548,20 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // or on a tablet that already transitioned to REPLICA. // // If a step fails in the middle, it will try to undo any changes it made. -func (tm *TabletManager) DemotePrimary(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { +func (tm *TabletManager) DemotePrimary(ctx context.Context, force bool) (*replicationdatapb.PrimaryStatus, error) { log.Infof("DemotePrimary") if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { return nil, err } // The public version always reverts on partial failure. - return tm.demotePrimary(ctx, true /* revertPartialFailure */) + return tm.demotePrimary(ctx, true /* revertPartialFailure */, force) } // demotePrimary implements DemotePrimary with an additional, private option. // // If revertPartialFailure is true, and a step fails in the middle, it will try // to undo any changes it made. -func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure bool) (primaryStatus *replicationdatapb.PrimaryStatus, finalErr error) { +func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure bool, force bool) (primaryStatus *replicationdatapb.PrimaryStatus, finalErr error) { if err := tm.lock(ctx); err != nil { return nil, err } @@ -623,13 +623,66 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure }() } - // Now we know no writes are in-flight and no new writes can occur. - // We just need to wait for no write being blocked on semi-sync ACKs. - err = tm.SemiSyncMonitor.WaitUntilSemiSyncUnblocked(ctx) + isSemiSyncBlocked, err := tm.MysqlDaemon.IsSemiSyncBlocked(ctx) if err != nil { return nil, err } + // `force` is true when `DemotePrimary` is called for `EmergencyReparentShard` or when a primary notices + // that a different tablet has been promoted to primary and demotes itself. + // + // In both cases, the reason for semi sync being blocked is very likely that there's no replica + // connected that can send semi-sync ACKs, so we need to disable semi-sync to enable read-only mode. + // And in either of these cases, it's almost guaranteed that no semi-sync enabled replica will connect + // to this tablet again. + // + // The only way for us to finish the demotion in this scenario is to disable semi-sync - otherwise + // enabling ``super_read_only` will end up waiting indefinitely for in-flight transactions + // to complete, which won't happen as they are waiting for semi-sync ACKs. + // + // By disabling semi-sync, we allow the blocking in-flight transactions to complete. Note that at this point, + // the query service is already disabled, so the original sessions that issued those writes + // will never have seen their transactions commit - they will already have received an error. + // + // The demoted primary will end up with errant GTIDs, but that's unavoidable in this scenario. + if force && isSemiSyncBlocked { + if tm.isPrimarySideSemiSyncEnabled(ctx) { + // Disable the primary side semi-sync to unblock the writes. + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_REPLICA, SemiSyncActionSet); err != nil { + return nil, err + } + defer func() { + if finalErr != nil && revertPartialFailure && wasPrimary { + // enable primary-side semi-sync again + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, SemiSyncActionSet); err != nil { + log.Warningf("fixSemiSync(PRIMARY) failed during revert: %v", err) + } + } + }() + } + } else { + // If `force` is false, we're demoting this primary as part of a `PlannedReparentShard` operation, + // but we might be blocked on semi-sync ACKs. + // + // If there's any in-flight transactions waiting for semi-sync ACKs, + // we won't be able to change the MySQL `super_read_only` because turning on + // read only mode requires all in-flight transactions to complete. + // + // So we're doing a last-ditch effort here trying to wait for in-flight transactions to complete. + // This will only be successful if at least one semi-sync enabled replica connects back to this primary + // and a new transaction commit unblocks the semi-sync wait. + // + // The scenario where this could happen is some sort of network hiccup during a + // `PlannedReparentShard` call, where the primary temporarily loses connectivity to + // all semi-sync enabled replicas. + // + // If we can't unblock within the context timeout, the `PlannedReparentShard` operation will fail. + err = tm.SemiSyncMonitor.WaitUntilSemiSyncUnblocked(ctx) + if err != nil { + return nil, err + } + } + // We can now set MySQL to super_read_only mode. If we are already super_read_only because of a // previous demotion, or because we are not primary anyway, this should be // idempotent. @@ -651,8 +704,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure } }() - // Here, we check if the primary side semi sync is enabled or not. If it isn't enabled then we do not need to take any action. - // If it is enabled then we should turn it off and revert in case of failure. + // If we haven't disabled the primary side semi-sync so far, do it now. if tm.isPrimarySideSemiSyncEnabled(ctx) { // If using semi-sync, we need to disable primary-side. if err := tm.fixSemiSync(ctx, topodatapb.TabletType_REPLICA, SemiSyncActionSet); err != nil { diff --git a/go/vt/vttablet/tabletmanager/rpc_replication_test.go b/go/vt/vttablet/tabletmanager/rpc_replication_test.go index cec0027fb55..31da3abb732 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication_test.go @@ -28,11 +28,12 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/tabletmanager/semisyncmonitor" "vitess.io/vitess/go/vt/vttablet/tabletserver" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // TestWaitForGrantsToHaveApplied tests that waitForGrantsToHaveApplied only succeeds after waitForDBAGrants has been called. @@ -97,7 +98,7 @@ func TestDemotePrimaryStalled(t *testing.T) { } go func() { - tm.demotePrimary(context.Background(), false) + tm.demotePrimary(context.Background(), false /* revertPartialFailure */, false /* force */) }() // We make IsServing stall by making it wait on a channel. // This should cause the demote primary operation to be stalled. @@ -130,12 +131,17 @@ func TestDemotePrimaryWaitingForSemiSyncUnblock(t *testing.T) { tm.SemiSyncMonitor.Open() // Add a universal insert query pattern that would block until we make it unblock. + // ExecuteFetchMulti will execute each statement separately, so we need to add SET query. + fakeDb.AddQueryPattern("SET SESSION lock_wait_timeout=.*", &sqltypes.Result{}) ch := make(chan int) fakeDb.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { <-ch }) // Add a fake query that makes the semi-sync monitor believe that the tablet is blocked on semi-sync ACKs. - fakeDb.AddQuery("select variable_value from performance_schema.global_status where regexp_like(variable_name, 'Rpl_semi_sync_(source|master)_wait_sessions')", sqltypes.MakeTestResult(sqltypes.MakeTestFields("Variable_value", "varchar"), "1")) + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(500) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|1", + "Rpl_semi_sync_source_yes_tx|5")) // Verify that in the beginning the tablet is serving. require.True(t, tm.QueryServiceControl.IsServing()) @@ -143,7 +149,7 @@ func TestDemotePrimaryWaitingForSemiSyncUnblock(t *testing.T) { // Start the demote primary operation in a go routine. var demotePrimaryFinished atomic.Bool go func() { - _, err := tm.demotePrimary(ctx, false) + _, err := tm.demotePrimary(ctx, false /* revertPartialFailure */, false /* force */) require.NoError(t, err) demotePrimaryFinished.Store(true) }() @@ -160,7 +166,10 @@ func TestDemotePrimaryWaitingForSemiSyncUnblock(t *testing.T) { require.False(t, fakeMysqlDaemon.SuperReadOnly.Load()) // Now we unblock the semi-sync monitor. - fakeDb.AddQuery("select variable_value from performance_schema.global_status where regexp_like(variable_name, 'Rpl_semi_sync_(source|master)_wait_sessions')", sqltypes.MakeTestResult(sqltypes.MakeTestFields("Variable_value", "varchar"), "0")) + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(1000) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|5")) close(ch) // This should unblock the demote primary operation eventually. @@ -171,6 +180,123 @@ func TestDemotePrimaryWaitingForSemiSyncUnblock(t *testing.T) { require.True(t, fakeMysqlDaemon.SuperReadOnly.Load()) } +// TestDemotePrimaryWithSemiSyncProgressDetection tests that demote primary proceeds +// without blocking when transactions are making progress (ackedTrxs increasing between checks). +func TestDemotePrimaryWithSemiSyncProgressDetection(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + tm := newTestTM(t, ts, 1, "ks", "0", nil) + // Make the tablet a primary. + err := tm.ChangeType(ctx, topodatapb.TabletType_PRIMARY, false) + require.NoError(t, err) + fakeMysqlDaemon := tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon) + fakeDb := fakeMysqlDaemon.DB() + fakeDb.SetNeverFail(true) + + tm.SemiSyncMonitor.Open() + + // Set up the query to show waiting sessions, but with progress (ackedTrxs increasing). + // The monitor makes TWO calls to getSemiSyncStats with a sleep between them. + // We add the query result multiple times. The fakesqldb will return them in order (FIFO). + // First few calls: waiting sessions present, ackedTrxs=5. + for range 3 { + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(1000) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|1", + "Rpl_semi_sync_source_yes_tx|5")) + } + // Next calls: waiting sessions present, but ackedTrxs=6 (progress!). + for range 10 { + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(1000) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|1", + "Rpl_semi_sync_source_yes_tx|6")) + } + + // Verify that in the beginning the tablet is serving. + require.True(t, tm.QueryServiceControl.IsServing()) + + // Start the demote primary operation in a go routine. + var demotePrimaryFinished atomic.Bool + go func() { + _, err := tm.demotePrimary(ctx, false /* revertPartialFailure */, false /* force */) + require.NoError(t, err) + demotePrimaryFinished.Store(true) + }() + + // Wait for the demote primary operation to have changed the serving state. + require.Eventually(t, func() bool { + return !tm.QueryServiceControl.IsServing() + }, 5*time.Second, 100*time.Millisecond) + + // DemotePrimary should finish quickly because progress is being made. + // It should NOT wait for semi-sync to unblock since ackedTrxs is increasing. + require.Eventually(t, func() bool { + return demotePrimaryFinished.Load() + }, 5*time.Second, 100*time.Millisecond) + + // We should have seen the super-read only query. + require.True(t, fakeMysqlDaemon.SuperReadOnly.Load()) +} + +// TestDemotePrimaryWhenSemiSyncBecomesUnblockedBetweenChecks tests that demote primary +// proceeds immediately when waiting sessions drops to 0 between the two checks. +func TestDemotePrimaryWhenSemiSyncBecomesUnblockedBetweenChecks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + tm := newTestTM(t, ts, 1, "ks", "0", nil) + // Make the tablet a primary. + err := tm.ChangeType(ctx, topodatapb.TabletType_PRIMARY, false) + require.NoError(t, err) + fakeMysqlDaemon := tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon) + fakeDb := fakeMysqlDaemon.DB() + fakeDb.SetNeverFail(true) + + tm.SemiSyncMonitor.Open() + + // Set up the query to show waiting sessions on first call, but 0 on second call. + // This simulates the semi-sync becoming unblocked between the two checks. + // The fakesqldb returns results in FIFO order. + // First call: waiting sessions present. + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(1000) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|2", + "Rpl_semi_sync_source_yes_tx|5")) + // Second and subsequent calls: no waiting sessions (unblocked!). + for range 10 { + fakeDb.AddQuery("SELECT /*+ MAX_EXECUTION_TIME(1000) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|5")) + } + + // Verify that in the beginning the tablet is serving. + require.True(t, tm.QueryServiceControl.IsServing()) + + // Start the demote primary operation in a go routine. + var demotePrimaryFinished atomic.Bool + go func() { + _, err := tm.demotePrimary(ctx, false /* revertPartialFailure */, false /* force */) + require.NoError(t, err) + demotePrimaryFinished.Store(true) + }() + + // Wait for the demote primary operation to have changed the serving state. + require.Eventually(t, func() bool { + return !tm.QueryServiceControl.IsServing() + }, 5*time.Second, 100*time.Millisecond) + + // DemotePrimary should finish quickly because semi-sync became unblocked. + require.Eventually(t, func() bool { + return demotePrimaryFinished.Load() + }, 5*time.Second, 100*time.Millisecond) + + // We should have seen the super-read only query. + require.True(t, fakeMysqlDaemon.SuperReadOnly.Load()) +} + // TestUndoDemotePrimaryStateChange tests that UndoDemotePrimary // if able to change the state of the tablet to Primary if there // is a mismatch with the tablet record. @@ -188,7 +314,7 @@ func TestUndoDemotePrimaryStateChange(t *testing.T) { // Check that the tablet is initially a replica. require.EqualValues(t, topodatapb.TabletType_REPLICA, tm.Tablet().Type) - // Verify that the tablet record says the tablet should be a primary + // Verify that the tablet record says the tablet should be a primary. require.EqualValues(t, topodatapb.TabletType_PRIMARY, ti.Type) err = tm.UndoDemotePrimary(ctx, false) diff --git a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go index 8f96a3bf0cd..13cad547a50 100644 --- a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go +++ b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "sync" + "sync/atomic" "time" "vitess.io/vitess/go/constants/sidecar" @@ -30,25 +31,36 @@ import ( "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) const ( - semiSyncWaitSessionsRead = "select variable_value from performance_schema.global_status where regexp_like(variable_name, 'Rpl_semi_sync_(source|master)_wait_sessions')" - semiSyncHeartbeatWrite = "INSERT INTO %s.semisync_heartbeat (ts) VALUES (NOW())" - semiSyncHeartbeatClear = "TRUNCATE TABLE %s.semisync_heartbeat" - maxWritesPermitted = 15 - clearTimerDuration = 24 * time.Hour + // How many seconds we should wait for table/metadata locks. + // We do NOT want our TRUNCATE statement to block things indefinitely, and + // we do NOT want our INSERTs blocking indefinitely on any locks to appear + // as though they are blocking on a semi-sync ACK, which is what we really + // care about in the monitor as when we hit the limit of writers blocked + // on semi-sync ACKs we signal to VTOrc that we need help to unblock + // things and it will perform an ERS to do so. + // Note: this is something we are entirely fine being set in all of the + // monitor connection pool sessions, so we do not ever bother to set the + // session value back to the global default. + setLockWaitTimeoutQuery = "SET SESSION lock_wait_timeout=%d" + + semiSyncStatsQuery = "SELECT /*+ MAX_EXECUTION_TIME(%d) */ variable_name, variable_value FROM performance_schema.global_status WHERE REGEXP_LIKE(variable_name, 'Rpl_semi_sync_(source|master)_(wait_sessions|yes_tx)')" + semiSyncHeartbeatWrite = "INSERT INTO %s.semisync_heartbeat (ts) VALUES (NOW())" + semiSyncHeartbeatClear = "TRUNCATE TABLE %s.semisync_heartbeat" + maxWritesPermitted = 15 + clearTimerDuration = 24 * time.Hour ) -var ( - // waitBetweenWrites is the time to wait between consecutive writes. - // This is a variable instead of a constant only to be tweaked in tests. - waitBetweenWrites = 1 * time.Second -) +type semiSyncStats struct { + waitingSessions, ackedTrxs int64 +} // Monitor is a monitor that checks if the primary tablet // is blocked on a semi-sync ack from the replica. @@ -77,7 +89,7 @@ type Monitor struct { // isWriting stores if the monitor is currently writing to the DB. // We don't want two different threads initiating writes, so we use this // for synchronization. - isWriting bool + isWriting atomic.Bool // inProgressWriteCount is the number of writes currently in progress. // The writes from the monitor themselves might get blocked and hence a count for them is required. // After enough writes are blocked, we want to notify VTOrc to run an ERS. @@ -91,6 +103,11 @@ type Monitor struct { // errorCount is the number of errors that the semi-sync monitor ran into. // We ignore some of the errors, so the counter is a good way to track how many errors we have seen. errorCount *stats.Counter + + // actionDelay is the time to wait between various actions. + actionDelay time.Duration + // actionTimeout is when we should time out a given action. + actionTimeout time.Duration } // NewMonitor creates a new Monitor. @@ -105,6 +122,8 @@ func NewMonitor(config *tabletenv.TabletConfig, exporter *servenv.Exporter) *Mon errorCount: exporter.NewCounter("SemiSyncMonitorErrorCount", "Number of errors encountered by the semi-sync monitor"), appPool: dbconnpool.NewConnectionPool("SemiSyncMonitorAppPool", exporter, maxWritesPermitted+5, mysqlctl.DbaIdleTimeout, 0, mysqlctl.PoolDynamicHostnameResolution), waiters: make([]chan struct{}, 0), + actionDelay: config.SemiSyncMonitor.Interval / 10, + actionTimeout: config.SemiSyncMonitor.Interval / 2, } } @@ -174,10 +193,8 @@ func (m *Monitor) Close() { // and manufactures a write to unblock the primary. This function is safe to // be called multiple times in parallel. func (m *Monitor) checkAndFixSemiSyncBlocked() { - // Check if semi-sync is blocked or not - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer cancel() - isBlocked, err := m.isSemiSyncBlocked(ctx) + // Check if semi-sync is blocked or not. + isBlocked, err := m.isSemiSyncBlocked() if err != nil { m.errorCount.Add(1) // If we are unable to determine whether the primary is blocked or not, @@ -197,7 +214,9 @@ func (m *Monitor) checkAndFixSemiSyncBlocked() { } // isSemiSyncBlocked checks if the primary is blocked on semi-sync. -func (m *Monitor) isSemiSyncBlocked(ctx context.Context) (bool, error) { +func (m *Monitor) isSemiSyncBlocked() (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), m.ticks.Interval()) + defer cancel() // Get a connection from the pool conn, err := m.appPool.Get(ctx) if err != nil { @@ -205,23 +224,16 @@ func (m *Monitor) isSemiSyncBlocked(ctx context.Context) (bool, error) { } defer conn.Recycle() - // Execute the query to check if the primary is blocked on semi-sync. - res, err := conn.Conn.ExecuteFetch(semiSyncWaitSessionsRead, 1, false) - if err != nil { + stats, err := m.getSemiSyncStats(conn) + if err != nil || stats.waitingSessions == 0 { return false, err } - // If we have no rows, then the primary doesn't have semi-sync enabled. - // It then follows, that the primary isn't blocked :) - if len(res.Rows) == 0 { - return false, nil - } - - // Read the status value and check if it is non-zero. - if len(res.Rows) != 1 || len(res.Rows[0]) != 1 { - return false, fmt.Errorf("unexpected number of rows received - %v", res.Rows) + time.Sleep(m.actionDelay) + followUpStats, err := m.getSemiSyncStats(conn) + if err != nil || followUpStats.waitingSessions == 0 || followUpStats.ackedTrxs > stats.ackedTrxs { + return false, err } - value, err := res.Rows[0][0].ToCastInt64() - return value != 0, err + return true, nil } // isClosed returns if the monitor is currently closed or not. @@ -272,25 +284,19 @@ func (m *Monitor) stillBlocked() bool { // checkAndSetIsWriting checks if the monitor is already writing to the DB. // If it is not, then it sets the isWriting field and signals the caller. func (m *Monitor) checkAndSetIsWriting() bool { - m.mu.Lock() - defer m.mu.Unlock() - if m.isWriting { - return false - } - m.isWriting = true - return true + return m.isWriting.CompareAndSwap(false, true) } // clearIsWriting clears the isWriting field. func (m *Monitor) clearIsWriting() { - m.mu.Lock() - defer m.mu.Unlock() - m.isWriting = false + m.isWriting.Store(false) } // startWrites starts writing to the DB. // It is re-entrant and will return if we are already writing. func (m *Monitor) startWrites() { + ctx, cancel := context.WithTimeout(context.Background(), m.ticks.Interval()) + defer cancel() // If we are already writing, then we can just return. if !m.checkAndSetIsWriting() { return @@ -300,13 +306,27 @@ func (m *Monitor) startWrites() { // Check if we need to continue writing or not. for m.stillBlocked() { + select { + case <-ctx.Done(): + return + default: + // We only need to do another write if there were no other successful + // writes and we're indeed still blocked. + blocked, err := m.isSemiSyncBlocked() + if err != nil { + return + } + if !blocked { + m.setIsBlocked(false) + return + } + } // We do the writes in a go-routine because if the network disruption // is somewhat long-lived, then the writes themselves can also block. // By doing them in a go-routine we give the system more time to recover while // exponentially backing off. We will not do more than maxWritesPermitted writes and once // all maxWritesPermitted writes are blocked, we'll wait for VTOrc to run an ERS. go m.write() - time.Sleep(waitBetweenWrites) } } @@ -330,7 +350,7 @@ func (m *Monitor) incrementWriteCount() bool { func (m *Monitor) AllWritesBlocked() bool { m.mu.Lock() defer m.mu.Unlock() - return m.isOpen && m.inProgressWriteCount == maxWritesPermitted + return m.isOpen && m.isBlocked && m.inProgressWriteCount == maxWritesPermitted } // decrementWriteCount decrements the write count. @@ -343,13 +363,12 @@ func (m *Monitor) decrementWriteCount() { // write writes a heartbeat to unblock semi-sync being stuck. func (m *Monitor) write() { - shouldWrite := m.incrementWriteCount() - if !shouldWrite { + if shouldWrite := m.incrementWriteCount(); !shouldWrite { return } defer m.decrementWriteCount() // Get a connection from the pool - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + ctx, cancel := context.WithTimeout(context.Background(), m.actionTimeout) defer cancel() conn, err := m.appPool.Get(ctx) if err != nil { @@ -357,7 +376,7 @@ func (m *Monitor) write() { log.Errorf("SemiSync Monitor: failed to get a connection when writing to semisync_heartbeat table: %v", err) return } - _, err = conn.Conn.ExecuteFetch(m.bindSideCarDBName(semiSyncHeartbeatWrite), 0, false) + err = conn.Conn.ExecuteFetchMultiDrain(m.addLockWaitTimeout(m.bindSideCarDBName(semiSyncHeartbeatWrite))) conn.Recycle() if err != nil { m.errorCount.Add(1) @@ -388,14 +407,16 @@ func (m *Monitor) setIsBlocked(val bool) { // consumes too much space on the MySQL instance. func (m *Monitor) clearAllData() { // Get a connection from the pool - conn, err := m.appPool.Get(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), m.actionTimeout) + defer cancel() + conn, err := m.appPool.Get(ctx) if err != nil { m.errorCount.Add(1) log.Errorf("SemiSync Monitor: failed get a connection to clear semisync_heartbeat table: %v", err) return } defer conn.Recycle() - _, err = conn.Conn.ExecuteFetch(m.bindSideCarDBName(semiSyncHeartbeatClear), 0, false) + _, _, err = conn.Conn.ExecuteFetchMulti(m.addLockWaitTimeout(m.bindSideCarDBName(semiSyncHeartbeatClear)), 0, false) if err != nil { m.errorCount.Add(1) log.Errorf("SemiSync Monitor: failed to clear semisync_heartbeat table: %v", err) @@ -416,3 +437,47 @@ func (m *Monitor) addWaiter() chan struct{} { func (m *Monitor) bindSideCarDBName(query string) string { return sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier()).Query } + +func (m *Monitor) addLockWaitTimeout(query string) string { + timeoutQuery := fmt.Sprintf(setLockWaitTimeoutQuery, int(m.actionTimeout.Seconds())) + return timeoutQuery + ";" + query +} + +func (m *Monitor) getSemiSyncStats(conn *dbconnpool.PooledDBConnection) (semiSyncStats, error) { + stats := semiSyncStats{} + // Execute the query to check if the primary is blocked on semi-sync. + query := fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()) + res, err := conn.Conn.ExecuteFetch(query, 2, false) + if err != nil { + return stats, err + } + // If we have no rows, then the primary doesn't have semi-sync enabled. + // It then follows, that the primary isn't blocked :) + if len(res.Rows) == 0 { + return stats, nil + } + + // Read the status value and check if it is non-zero. + if len(res.Rows) != 2 { + return stats, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected number of rows received, expected 2 but got %d, for semi-sync stats query %s", len(res.Rows), query) + } + if len(res.Rows[0]) != 2 { + return stats, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected number of columns received, expected 2 but got %d, for semi-sync stats query %s", len(res.Rows[0]), query) + } + for i := range len(res.Rows) { + name := res.Rows[i][0].ToString() + value, err := res.Rows[i][1].ToCastInt64() + if err != nil { + return stats, vterrors.Wrapf(err, "unexpected results for semi-sync stats query %s: %v", query, res.Rows) + } + switch name { + case "Rpl_semi_sync_master_wait_sessions", "Rpl_semi_sync_source_wait_sessions": + stats.waitingSessions = value + case "Rpl_semi_sync_master_yes_tx", "Rpl_semi_sync_source_yes_tx": + stats.ackedTrxs = value + default: + return stats, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected results for semi-sync stats query %s: %v", query, res.Rows) + } + } + return stats, nil +} diff --git a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go index 3fdb1f1137e..2159bd3cb91 100644 --- a/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go +++ b/go/vt/vttablet/tabletmanager/semisyncmonitor/monitor_test.go @@ -18,6 +18,7 @@ package semisyncmonitor import ( "context" + "fmt" "runtime" "strconv" "sync" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" @@ -61,56 +63,339 @@ func createFakeDBAndMonitor(t *testing.T) (*fakesqldb.DB, *Monitor) { } // TestMonitorIsSemiSyncBlocked tests the functionality of isSemiSyncBlocked. +// NOTE: This test focuses on the first getSemiSyncStats call and early-return logic. +// The full two-call behavior is tested in TestMonitorIsSemiSyncBlockedProgressDetection. func TestMonitorIsSemiSyncBlocked(t *testing.T) { defer utils.EnsureNoLeaks(t) + tests := []struct { name string - res *sqltypes.Result + result *sqltypes.Result want bool wantErr string }{ { - name: "no rows", - res: &sqltypes.Result{}, - want: false, + name: "no rows - semi-sync not enabled", + result: &sqltypes.Result{}, + want: false, }, { - name: "incorrect number of rows", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1", "1"), - wantErr: "Row count exceeded 1", + name: "incorrect results - invalid variable names", + result: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "foo|3", "foo|3"), + wantErr: "unexpected results for semi-sync stats query", }, { - name: "incorrect number of fields", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value|a", "varchar|varchar"), "1|2"), - wantErr: `unexpected number of rows received - [[VARCHAR("1") VARCHAR("2")]]`, + name: "unblocked - zero waiting sessions", + result: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|0", "Rpl_semi_sync_source_yes_tx|1"), + want: false, }, { - name: "Unblocked", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0"), - want: false, + name: "has waiting sessions - needs second check", + result: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|1", "Rpl_semi_sync_source_yes_tx|5"), + // With fakesqldb limitation, second call returns same result, so it appears blocked. + want: true, }, { - name: "Blocked", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1"), + name: "master prefix for backwards compatibility", + result: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_master_wait_sessions|2", "Rpl_semi_sync_master_yes_tx|50"), + // With fakesqldb limitation, second call returns same result, so it appears blocked. want: true, }, + { + name: "invalid variable value", + result: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|not_a_number", "Rpl_semi_sync_source_yes_tx|5"), + wantErr: "unexpected results for semi-sync stats query", + }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, m := createFakeDBAndMonitor(t) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer db.Close() defer func() { m.Close() waitUntilWritingStopped(t, m) }() - db.AddQuery(semiSyncWaitSessionsRead, tt.res) - got, err := m.isSemiSyncBlocked(context.Background()) + + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), tt.result) + + got, err := m.isSemiSyncBlocked() if tt.wantErr != "" { - require.EqualError(t, err, tt.wantErr) + require.ErrorContains(t, err, tt.wantErr) return } require.NoError(t, err) - require.EqualValues(t, tt.want, got) + require.Equal(t, tt.want, got) + }) + } +} + +// TestMonitorIsSemiSyncBlockedConnectionError tests that we do not +// consider semi-sync blocked when we encounter an error trying to check. +func TestMonitorIsSemiSyncBlockedConnectionError(t *testing.T) { + defer utils.EnsureNoLeaks(t) + db, m := createFakeDBAndMonitor(t) + defer db.Close() + + // Close the pool to simulate connection errors. + m.mu.Lock() + m.appPool.Close() + m.mu.Unlock() + + defer func() { + m.Close() + waitUntilWritingStopped(t, m) + }() + + // The function should return an error when it can't get a connection. + got, err := m.isSemiSyncBlocked() + require.Error(t, err) + require.False(t, got) +} + +// TestMonitorIsSemiSyncBlockedWithBadResults tests error handling when +// the query returns an unexpected result. +func TestMonitorIsSemiSyncBlockedWithBadResults(t *testing.T) { + defer utils.EnsureNoLeaks(t) + + tests := []struct { + name string + res *sqltypes.Result + wantErr string + }{ + { + name: "one row instead of two", + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|1"), + wantErr: "unexpected number of rows received, expected 2 but got 1", + }, + { + name: "one column instead of two", + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1", "1"), + wantErr: "unexpected number of columns received, expected 2 but got 1", + }, + { + name: "three rows instead of two", + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|1", "Rpl_semi_sync_source_yes_tx|5", "extra_row|10"), + // Note: The actual error is "Row count exceeded" because ExecuteFetch has maxrows=2. + wantErr: "Row count exceeded", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, m := createFakeDBAndMonitor(t) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second + defer db.Close() + defer func() { + m.Close() + waitUntilWritingStopped(t, m) + }() + + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), tt.res) + + got, err := m.isSemiSyncBlocked() + require.False(t, got) + require.ErrorContains(t, err, tt.wantErr) + }) + } +} + +// TestMonitorIsSemiSyncBlockedProgressDetection tests various scenarios +// for detecting progress in semi-sync replication by directly calling +// getSemiSyncStats to verify the logic. +func TestMonitorIsSemiSyncBlockedProgressDetection(t *testing.T) { + defer utils.EnsureNoLeaks(t) + + tests := []struct { + name string + firstWaiting int64 + firstAcked int64 + secondWaiting int64 + secondAcked int64 + expectedBlocked bool + description string + }{ + { + name: "progress - acked increased by 1", + firstWaiting: 2, + firstAcked: 100, + secondWaiting: 2, + secondAcked: 101, + expectedBlocked: false, + description: "should detect progress when acked transactions increase", + }, + { + name: "progress - acked increased significantly", + firstWaiting: 1, + firstAcked: 50, + secondWaiting: 1, + secondAcked: 1000, + expectedBlocked: false, + description: "should detect progress with large acked transaction increase", + }, + { + name: "progress - waiting decreased to zero", + firstWaiting: 3, + firstAcked: 100, + secondWaiting: 0, + secondAcked: 100, + expectedBlocked: false, + description: "should detect progress when waiting sessions drop to zero", + }, + { + name: "blocked - waiting decreased but transactions not progressing", + firstWaiting: 5, + firstAcked: 100, + secondWaiting: 2, + secondAcked: 100, + expectedBlocked: true, + description: "should still be blocked when waiting sessions decrease but no transactions are acked", + }, + { + name: "blocked - no change in metrics", + firstWaiting: 2, + firstAcked: 100, + secondWaiting: 2, + secondAcked: 100, + expectedBlocked: true, + description: "should detect blocked state when no metrics change", + }, + { + name: "blocked - waiting increased", + firstWaiting: 1, + firstAcked: 100, + secondWaiting: 3, + secondAcked: 100, + expectedBlocked: true, + description: "should detect blocked state when waiting sessions increase", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Directly test the logic by simulating what isSemiSyncBlocked does. + // First call - check initial stats. + stats := semiSyncStats{ + waitingSessions: tt.firstWaiting, + ackedTrxs: tt.firstAcked, + } + + // Early return conditions from isSemiSyncBlocked. + if stats.waitingSessions == 0 { + require.False(t, tt.expectedBlocked, tt.description) + return + } + + // Second call - check follow-up stats. + followUpStats := semiSyncStats{ + waitingSessions: tt.secondWaiting, + ackedTrxs: tt.secondAcked, + } + + // Check if we're still blocked based on the actual logic in isSemiSyncBlocked. + // Returns false (not blocked) if: waitingSessions == 0 OR ackedTrxs increased. + isBlocked := !(followUpStats.waitingSessions == 0 || followUpStats.ackedTrxs > stats.ackedTrxs) + + require.Equal(t, tt.expectedBlocked, isBlocked, tt.description) + }) + } +} + +// TestGetSemiSyncStats tests the getSemiSyncStats helper function. +func TestGetSemiSyncStats(t *testing.T) { + defer utils.EnsureNoLeaks(t) + + tests := []struct { + name string + res *sqltypes.Result + expectedWaiting int64 + expectedAcked int64 + wantErr string + }{ + { + name: "valid source prefix", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|3", + "Rpl_semi_sync_source_yes_tx|150"), + expectedWaiting: 3, + expectedAcked: 150, + }, + { + name: "valid master prefix", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_master_wait_sessions|5", + "Rpl_semi_sync_master_yes_tx|200"), + expectedWaiting: 5, + expectedAcked: 200, + }, + { + name: "zero values", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|0"), + expectedWaiting: 0, + expectedAcked: 0, + }, + { + name: "large values", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|999999", + "Rpl_semi_sync_source_yes_tx|123456789"), + expectedWaiting: 999999, + expectedAcked: 123456789, + }, + { + name: "no rows returns empty stats", + res: &sqltypes.Result{}, + expectedWaiting: 0, + expectedAcked: 0, + }, + { + name: "wrong number of rows", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|3"), + wantErr: "unexpected number of rows received, expected 2 but got 1", + }, + { + name: "invalid variable name", + res: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "invalid_variable|3", + "Rpl_semi_sync_source_yes_tx|150"), + wantErr: "unexpected results for semi-sync stats query", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, m := createFakeDBAndMonitor(t) + defer db.Close() + defer func() { + m.Close() + waitUntilWritingStopped(t, m) + }() + + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), tt.res) + conn, err := m.appPool.Get(context.Background()) + require.NoError(t, err) + defer conn.Recycle() + + stats, err := m.getSemiSyncStats(conn) + if tt.wantErr != "" { + require.ErrorContains(t, err, tt.wantErr) + return + } + require.NoError(t, err) + require.Equal(t, tt.expectedWaiting, stats.waitingSessions) + require.Equal(t, tt.expectedAcked, stats.ackedTrxs) }) } } @@ -147,9 +432,13 @@ func TestMonitorClearAllData(t *testing.T) { waitUntilWritingStopped(t, m) }() db.SetNeverFail(true) + // ExecuteFetchMulti will execute each statement separately, so we need to add both queries. + db.AddQuery("SET SESSION lock_wait_timeout=5", &sqltypes.Result{}) + db.AddQuery("truncate table _vt.semisync_heartbeat", &sqltypes.Result{}) m.clearAllData() ql := db.QueryLog() - require.EqualValues(t, "truncate table _vt.semisync_heartbeat", ql) + require.Contains(t, ql, "set session lock_wait_timeout=5") + require.Contains(t, ql, "truncate table _vt.semisync_heartbeat") } // TestMonitorWaitMechanism tests that the wait mechanism works as intended. @@ -299,6 +588,9 @@ func TestMonitorAllWritesBlocked(t *testing.T) { }() m.mu.Lock() m.inProgressWriteCount = tt.initVal + if m.inProgressWriteCount == tt.initVal { + m.isBlocked = true + } m.mu.Unlock() require.EqualValues(t, tt.expected, m.AllWritesBlocked()) }) @@ -308,21 +600,21 @@ func TestMonitorAllWritesBlocked(t *testing.T) { func TestMonitorWrite(t *testing.T) { defer utils.EnsureNoLeaks(t) tests := []struct { - initVal int - queryLog string + initVal int + shouldWrite bool }{ { - initVal: maxWritesPermitted - 2, - queryLog: "insert into _vt.semisync_heartbeat (ts) values (now())", + initVal: maxWritesPermitted - 2, + shouldWrite: true, }, { - initVal: maxWritesPermitted - 1, - queryLog: "insert into _vt.semisync_heartbeat (ts) values (now())", + initVal: maxWritesPermitted - 1, + shouldWrite: true, }, { - initVal: maxWritesPermitted, - queryLog: "", + initVal: maxWritesPermitted, + shouldWrite: false, }, { - initVal: 0, - queryLog: "insert into _vt.semisync_heartbeat (ts) values (now())", + initVal: 0, + shouldWrite: true, }, } for _, tt := range tests { @@ -334,6 +626,9 @@ func TestMonitorWrite(t *testing.T) { waitUntilWritingStopped(t, m) }() db.SetNeverFail(true) + // ExecuteFetchMulti will execute each statement separately, so we need to add both queries. + db.AddQuery("SET SESSION lock_wait_timeout=5", &sqltypes.Result{}) + db.AddQuery("insert into _vt.semisync_heartbeat (ts) values (now())", &sqltypes.Result{}) m.mu.Lock() m.inProgressWriteCount = tt.initVal m.writesBlockedGauge.Set(int64(tt.initVal)) @@ -343,7 +638,13 @@ func TestMonitorWrite(t *testing.T) { require.EqualValues(t, tt.initVal, m.inProgressWriteCount) require.EqualValues(t, tt.initVal, m.writesBlockedGauge.Get()) m.mu.Unlock() - require.EqualValues(t, tt.queryLog, db.QueryLog()) + queryLog := db.QueryLog() + if tt.shouldWrite { + require.Contains(t, queryLog, "set session lock_wait_timeout=5") + require.Contains(t, queryLog, "insert into _vt.semisync_heartbeat (ts) values (now())") + } else { + require.Equal(t, "", queryLog) + } }) } } @@ -351,12 +652,9 @@ func TestMonitorWrite(t *testing.T) { // TestMonitorWriteBlocked tests the write function when the writes are blocked. func TestMonitorWriteBlocked(t *testing.T) { defer utils.EnsureNoLeaks(t) - initialVal := waitBetweenWrites - waitBetweenWrites = 250 * time.Millisecond - defer func() { - waitBetweenWrites = initialVal - }() db, m := createFakeDBAndMonitor(t) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer db.Close() defer func() { m.Close() @@ -368,11 +666,12 @@ func TestMonitorWriteBlocked(t *testing.T) { require.EqualValues(t, 0, m.inProgressWriteCount) m.mu.Unlock() - // Add a universal insert query pattern that would block until we make it unblock. - ch := make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) + // ExecuteFetchMulti will execute each statement separately, so we need to add SET query and INSERT query. + // Add them multiple times so the writes can execute. + for range maxWritesPermitted { + db.AddQuery("SET SESSION lock_wait_timeout=1", &sqltypes.Result{}) + db.AddQuery("INSERT INTO _vt.semisync_heartbeat (ts) VALUES (NOW())", &sqltypes.Result{}) + } // Do a write, which we expect to block. var writeFinished atomic.Bool @@ -380,24 +679,24 @@ func TestMonitorWriteBlocked(t *testing.T) { m.write() writeFinished.Store(true) }() - // We should see the number of writes blocked to increase. - require.Eventually(t, func() bool { - m.mu.Lock() - defer m.mu.Unlock() - return m.inProgressWriteCount == 1 - }, 2*time.Second, 100*time.Millisecond) - // Once the writers are unblocked, we expect to see a zero value again. - close(ch) + // We should see the number of writers increase briefly, before it completes. + require.Zero(t, m.errorCount.Get()) require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return m.inProgressWriteCount == 0 - }, 2*time.Second, 100*time.Millisecond) + return m.inProgressWriteCount > 0 + }, 2*time.Second, 5*time.Microsecond) + // Check that the writes finished successfully. require.Eventually(t, func() bool { return writeFinished.Load() }, 2*time.Second, 100*time.Millisecond) + + // After write completes, count should be back to zero. + m.mu.Lock() + defer m.mu.Unlock() + require.EqualValues(t, 0, m.inProgressWriteCount) } // TestIsWriting checks the transitions for the isWriting field. @@ -412,55 +711,58 @@ func TestIsWriting(t *testing.T) { // Check the initial value of the isWriting field. m.mu.Lock() - require.False(t, m.isWriting) + require.False(t, m.isWriting.Load()) m.mu.Unlock() // Clearing a false field does nothing. m.clearIsWriting() m.mu.Lock() - require.False(t, m.isWriting) + require.False(t, m.isWriting.Load()) m.mu.Unlock() // Check and set should set the field. set := m.checkAndSetIsWriting() require.True(t, set) m.mu.Lock() - require.True(t, m.isWriting) + require.True(t, m.isWriting.Load()) m.mu.Unlock() // Checking and setting shouldn't do anything. set = m.checkAndSetIsWriting() require.False(t, set) m.mu.Lock() - require.True(t, m.isWriting) + require.True(t, m.isWriting.Load()) m.mu.Unlock() // Clearing should now make the field false. m.clearIsWriting() m.mu.Lock() - require.False(t, m.isWriting) + require.False(t, m.isWriting.Load()) m.mu.Unlock() } func TestStartWrites(t *testing.T) { defer utils.EnsureNoLeaks(t) - initialVal := waitBetweenWrites - waitBetweenWrites = 250 * time.Millisecond - defer func() { - waitBetweenWrites = initialVal - }() db, m := createFakeDBAndMonitor(t) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer db.Close() defer func() { m.Close() waitUntilWritingStopped(t, m) }() - // Add a universal insert query pattern that would block until we make it unblock. - ch := make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) + // Set up semi-sync stats query to return blocked state (waiting sessions > 0, no progress). + // This is what isSemiSyncBlocked will check inside startWrites. + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|2", + "Rpl_semi_sync_source_yes_tx|100")) + + // ExecuteFetchMulti will execute each statement separately. + // Use patterns for both SET and INSERT since they can be called multiple times. + db.AddQuery("SET SESSION lock_wait_timeout=1", &sqltypes.Result{}) + db.AddQuery("INSERT INTO _vt.semisync_heartbeat (ts) VALUES (NOW())", &sqltypes.Result{}) // If we aren't blocked, then start writes doesn't do anything. m.startWrites() @@ -469,69 +771,53 @@ func TestStartWrites(t *testing.T) { // Now we set the monitor to be blocked. m.setIsBlocked(true) - var writesFinished atomic.Bool - go func() { - m.startWrites() - writesFinished.Store(true) - }() - - // We should see the number of writes blocked to increase. - require.Eventually(t, func() bool { - m.mu.Lock() - defer m.mu.Unlock() - return m.inProgressWriteCount >= 1 - }, 2*time.Second, 100*time.Millisecond) - - // Once the writes have started, another call to startWrites shouldn't do anything + // Start writes and wait for them to complete. m.startWrites() - // We should continue to see the number of writes blocked increase. + // Check that some writes are in progress. require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return m.inProgressWriteCount >= 2 - }, 2*time.Second, 100*time.Millisecond) + return m.inProgressWriteCount > 0 + }, 2*time.Second, 5*time.Microsecond) - // Check that the writes are still going. - require.False(t, writesFinished.Load()) + // Verify the query log shows the writes were executed. + queryLog := db.QueryLog() + require.Contains(t, queryLog, "insert into _vt.semisync_heartbeat") - // Make the monitor unblocked. This should stop the writes eventually. + // Make the monitor unblocked. This should stop the writes. m.setIsBlocked(false) - close(ch) - - require.Eventually(t, func() bool { - return writesFinished.Load() - }, 2*time.Second, 100*time.Millisecond) // Check that no writes are in progress anymore. require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() return m.inProgressWriteCount == 0 - }, 2*time.Second, 100*time.Millisecond) + }, 5*time.Second, 100*time.Millisecond) } func TestCheckAndFixSemiSyncBlocked(t *testing.T) { defer utils.EnsureNoLeaks(t) - initialVal := waitBetweenWrites - waitBetweenWrites = 250 * time.Millisecond - defer func() { - waitBetweenWrites = initialVal - }() db, m := createFakeDBAndMonitor(t) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer db.Close() defer func() { m.Close() waitUntilWritingStopped(t, m) }() - // Initially everything is unblocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) - // Add a universal insert query pattern that would block until we make it unblock. - ch := make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) + db.SetNeverFail(true) + // Initially everything is unblocked (zero waiting sessions). + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|10")) + + // ExecuteFetchMulti will execute each statement separately. + // Use patterns for both SET and INSERT since they can be called multiple times. + db.AddQuery("SET SESSION lock_wait_timeout=1", &sqltypes.Result{}) + db.AddQuery("INSERT INTO _vt.semisync_heartbeat (ts) VALUES (NOW())", &sqltypes.Result{}) // Check that the monitor thinks we are unblocked. m.checkAndFixSemiSyncBlocked() @@ -539,74 +825,130 @@ func TestCheckAndFixSemiSyncBlocked(t *testing.T) { require.False(t, m.isBlocked) m.mu.Unlock() - // Now we set the monitor to be blocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "2")) - m.checkAndFixSemiSyncBlocked() + // Now we set the monitor to be blocked (waiting sessions > 0, no progress). + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|2", + "Rpl_semi_sync_source_yes_tx|10")) - m.mu.Lock() - require.True(t, m.isBlocked) - m.mu.Unlock() + // Manually set isBlocked and start writes like the monitor would do. + m.setIsBlocked(true) - // Checking again shouldn't make a difference. - m.checkAndFixSemiSyncBlocked() - m.mu.Lock() - require.True(t, m.isBlocked) - m.mu.Unlock() + // Start writes and wait for them to complete. + m.startWrites() - // Meanwhile writes should have started and should be getting blocked. + // Wait a bit to let writes execute require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return m.inProgressWriteCount >= 2 - }, 2*time.Second, 100*time.Millisecond) + return m.inProgressWriteCount == 0 + }, 2*time.Second, 5*time.Microsecond) - // Now we set the monitor to be unblocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) - close(ch) - m.checkAndFixSemiSyncBlocked() + // Verify the query log shows the writes were executed. + queryLog := db.QueryLog() + require.Contains(t, queryLog, "insert into _vt.semisync_heartbeat") - // We expect the writes to clear out and also the monitor should think its unblocked. - m.mu.Lock() - require.False(t, m.isBlocked) - m.mu.Unlock() + // Now we set the monitor to be unblocked (waiting sessions = 0). + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|10")) + + // Make the monitor unblocked. This should stop the writes. + m.setIsBlocked(false) + + // Check that no writes are in progress anymore. require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return m.inProgressWriteCount == 0 && m.isWriting == false - }, 2*time.Second, 100*time.Millisecond) + return m.inProgressWriteCount == 0 + }, 10*time.Second, 100*time.Millisecond) +} + +// statefulQueryHandler is a custom query handler that can return different results +// based on an atomic boolean state. This allows tests to dynamically change query +// results without relying on precise query result counts. +type statefulQueryHandler struct { + db *fakesqldb.DB + semisyncBlocked atomic.Bool + blockedResult *sqltypes.Result + unblockedResult *sqltypes.Result + semiSyncStatsQuery string +} + +func (h *statefulQueryHandler) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error { + if query == h.semiSyncStatsQuery { + if h.semisyncBlocked.Load() { + return callback(h.blockedResult) + } + return callback(h.unblockedResult) + } + // Fall back to default handler for all other queries (SET and INSERT patterns). + return h.db.HandleQuery(c, query, callback) } func TestWaitUntilSemiSyncUnblocked(t *testing.T) { defer utils.EnsureNoLeaks(t) - initialVal := waitBetweenWrites - waitBetweenWrites = 250 * time.Millisecond - defer func() { - waitBetweenWrites = initialVal - }() - db, m := createFakeDBAndMonitor(t) + db := fakesqldb.New(t) defer db.Close() + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") + config := &tabletenv.TabletConfig{ + DB: dbc, + SemiSyncMonitor: tabletenv.SemiSyncMonitorConfig{ + Interval: 100 * time.Millisecond, + }, + } + m := NewMonitor(config, exporter) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer func() { m.Close() waitUntilWritingStopped(t, m) }() db.SetNeverFail(true) - // Initially everything is unblocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) + + // Set up a custom query handler that returns different results based on state + handler := &statefulQueryHandler{ + db: db, + semiSyncStatsQuery: fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), + blockedResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|3", + "Rpl_semi_sync_source_yes_tx|3"), + unblockedResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|0"), + } + handler.semisyncBlocked.Store(false) // Initially unblocked + db.Handler = handler + + // ExecuteFetchMulti will execute each statement separately + // Use patterns for both SET and INSERT since they can be called multiple times + db.AddQuery("SET SESSION lock_wait_timeout=1", &sqltypes.Result{}) + db.AddQuery("INSERT INTO _vt.semisync_heartbeat (ts) VALUES (NOW())", &sqltypes.Result{}) + + // Open the monitor so the periodic timer runs. + m.Open() // When everything is unblocked, then this should return without blocking. err := m.WaitUntilSemiSyncUnblocked(context.Background()) require.NoError(t, err) - // Add a universal insert query pattern that would block until we make it unblock. - ch := make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) - // Now we set the monitor to be blocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "3")) + // Now we set the monitor to be blocked by changing the state. + handler.semisyncBlocked.Store(true) + + // Wait until the writes have started. + require.Eventually(t, func() bool { + m.mu.Lock() + defer m.mu.Unlock() + // Check if we have any in-progress writes, which indicates writing has started. + return m.inProgressWriteCount > 0 || m.isWriting.Load() + }, 5*time.Second, 5*time.Microsecond) - // wg is used to keep track of all the go routines. wg := sync.WaitGroup{} // Start a cancellable context and use that to wait. ctx, cancel := context.WithCancel(context.Background()) @@ -623,19 +965,10 @@ func TestWaitUntilSemiSyncUnblocked(t *testing.T) { }() // Start another go routine, also waiting for semi-sync being unblocked, but not using the cancellable context. - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { err := m.WaitUntilSemiSyncUnblocked(context.Background()) require.NoError(t, err) - }() - - // Wait until the writes have started. - require.Eventually(t, func() bool { - m.mu.Lock() - defer m.mu.Unlock() - return m.isWriting - }, 2*time.Second, 100*time.Millisecond) + }) // Now we cancel the context. This should fail the first wait. cancel() @@ -649,9 +982,9 @@ func TestWaitUntilSemiSyncUnblocked(t *testing.T) { require.EqualError(t, ctxErr, "context canceled") mu.Unlock() - // Now we set the monitor to be unblocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) - close(ch) + // Now we set the monitor to be unblocked by changing the state + handler.semisyncBlocked.Store(false) + err = m.WaitUntilSemiSyncUnblocked(context.Background()) require.NoError(t, err) // This should unblock the second wait. @@ -660,7 +993,7 @@ func TestWaitUntilSemiSyncUnblocked(t *testing.T) { require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return !m.isWriting + return !m.isWriting.Load() }, 2*time.Second, 100*time.Millisecond) // Also verify that if the monitor is closed, we don't wait. @@ -688,10 +1021,12 @@ func TestDeadlockOnClose(t *testing.T) { }, } m := NewMonitor(config, exporter) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second // Set up for semisync to be blocked db.SetNeverFail(true) - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1")) + db.AddQuery(fmt.Sprintf(semiSyncStatsQuery, m.actionTimeout.Milliseconds()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), "Rpl_semi_sync_source_wait_sessions|1", "Rpl_semi_sync_source_yes_tx|1")) // Open the monitor m.Open() @@ -704,7 +1039,7 @@ func TestDeadlockOnClose(t *testing.T) { finishCh := make(chan int) go func() { count := 100 - for i := 0; i < count; i++ { + for range count { m.Close() m.Open() time.Sleep(20 * time.Millisecond) @@ -728,11 +1063,6 @@ func TestDeadlockOnClose(t *testing.T) { // It only calls the exported methods to see they work as intended. func TestSemiSyncMonitor(t *testing.T) { defer utils.EnsureNoLeaks(t) - initialVal := waitBetweenWrites - waitBetweenWrites = 250 * time.Millisecond - defer func() { - waitBetweenWrites = initialVal - }() db := fakesqldb.New(t) defer db.Close() params := db.ConnParams() @@ -741,18 +1071,39 @@ func TestSemiSyncMonitor(t *testing.T) { config := &tabletenv.TabletConfig{ DB: dbc, SemiSyncMonitor: tabletenv.SemiSyncMonitorConfig{ - Interval: 1 * time.Second, + Interval: 100 * time.Millisecond, }, } m := NewMonitor(config, exporter) + m.actionDelay = 10 * time.Millisecond + m.actionTimeout = 1 * time.Second defer func() { m.Close() waitUntilWritingStopped(t, m) }() db.SetNeverFail(true) - // Initially everything is unblocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) + + // Set up a custom query handler that returns different results based on state. + handler := &statefulQueryHandler{ + db: db, + semiSyncStatsQuery: fmt.Sprintf(semiSyncStatsQuery, m.actionDelay.Milliseconds()), + blockedResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|1", + "Rpl_semi_sync_source_yes_tx|1"), + unblockedResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields("variable_name|variable_value", "varchar|varchar"), + "Rpl_semi_sync_source_wait_sessions|0", + "Rpl_semi_sync_source_yes_tx|0"), + } + handler.semisyncBlocked.Store(false) // Initially unblocked + db.Handler = handler + + // ExecuteFetchMulti will execute each statement separately + // Use patterns for both SET and INSERT since they can be called multiple times. + db.AddQuery("SET SESSION lock_wait_timeout=1", &sqltypes.Result{}) + db.AddQuery("INSERT INTO _vt.semisync_heartbeat (ts) VALUES (NOW())", &sqltypes.Result{}) // Open the monitor. m.Open() @@ -764,67 +1115,27 @@ func TestSemiSyncMonitor(t *testing.T) { err := m.WaitUntilSemiSyncUnblocked(ctx) require.NoError(t, err) - // Add a universal insert query pattern that would block until we make it unblock. - ch := make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) - // Now we set the monitor to be blocked. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1")) + // Test that WaitUntilSemiSyncUnblocked works correctly when the monitor starts returning unblocked. + // We don't need to test the blocking behavior in detail since TestWaitUntilSemiSyncUnblocked covers that. + // This test just verifies the basic black-box behavior. - // Start a go routine waiting for semi-sync being unblocked. - var waitFinished atomic.Bool - go func() { - err := m.WaitUntilSemiSyncUnblocked(context.Background()) - require.NoError(t, err) - waitFinished.Store(true) - }() + // Set to blocked state. + handler.semisyncBlocked.Store(true) - // Even if we wait a second, the wait shouldn't be over. - time.Sleep(1 * time.Second) - require.False(t, waitFinished.Load()) - - // If we unblock the semi-sync, then the wait should finish. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) - close(ch) - require.Eventually(t, func() bool { - return waitFinished.Load() - }, 2*time.Second, 100*time.Millisecond) - require.False(t, m.AllWritesBlocked()) - - // Add a universal insert query pattern that would block until we make it unblock. - ch = make(chan int) - db.AddQueryPatternWithCallback("^INSERT INTO.*", sqltypes.MakeTestResult(nil), func(s string) { - <-ch - }) - // We block the semi-sync again. - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "1")) - - // Start another go routine, also waiting for semi-sync being unblocked. - waitFinished.Store(false) + // Start a waiter. + var waitFinished atomic.Bool go func() { err := m.WaitUntilSemiSyncUnblocked(context.Background()) require.NoError(t, err) waitFinished.Store(true) }() - // Since the writes are now blocking, eventually all the writes should block. - require.Eventually(t, func() bool { - return m.AllWritesBlocked() - }, 10*time.Second, 100*time.Millisecond) - - // The wait should still not have ended. - require.False(t, waitFinished.Load()) - - // Now we unblock the writes and semi-sync. - close(ch) - db.AddQuery(semiSyncWaitSessionsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("variable_value", "varchar"), "0")) + // Now unblock and verify the wait completes. + handler.semisyncBlocked.Store(false) - // The wait should now finish. require.Eventually(t, func() bool { return waitFinished.Load() - }, 2*time.Second, 100*time.Millisecond) - require.False(t, m.AllWritesBlocked()) + }, 5*time.Second, 100*time.Millisecond) // Close the monitor. m.Close() @@ -832,7 +1143,7 @@ func TestSemiSyncMonitor(t *testing.T) { require.Eventually(t, func() bool { m.mu.Lock() defer m.mu.Unlock() - return !m.isWriting + return !m.isWriting.Load() }, 2*time.Second, 100*time.Millisecond) } @@ -852,7 +1163,7 @@ func waitUntilWritingStopped(t *testing.T, m *Monitor) { t.Fatalf("Timed out waiting for writing to stop: %v", ctx.Err()) case <-tick.C: m.mu.Lock() - if !m.isWriting { + if !m.isWriting.Load() { m.mu.Unlock() return } diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 97397ef2a34..546f60c96fe 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -242,7 +242,7 @@ func (tm *TabletManager) endPrimaryTerm(ctx context.Context, primaryAlias *topod log.Infof("Active reparents are enabled; converting MySQL to replica.") demotePrimaryCtx, cancelDemotePrimary := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancelDemotePrimary() - if _, err := tm.demotePrimary(demotePrimaryCtx, false /* revertPartialFailure */); err != nil { + if _, err := tm.demotePrimary(demotePrimaryCtx, false /* revertPartialFailure */, true /* force */); err != nil { return vterrors.Wrap(err, "failed to demote primary") } setPrimaryCtx, cancelSetPrimary := context.WithTimeout(ctx, topo.RemoteOperationTimeout) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index a3c84770ec3..11ce7731183 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -89,13 +89,14 @@ const ( var ( // The following flags initialize the tablet record. - tabletHostname string - initKeyspace string - initShard string - initTabletType string - initDbNameOverride string - skipBuildInfoTags = "/.*/" - initTags flagutil.StringMapValue + tabletHostname string + initKeyspace string + initShard string + initTabletType string + initTabletTypeLookup bool + initDbNameOverride string + skipBuildInfoTags = "/.*/" + initTags flagutil.StringMapValue initTimeout = 1 * time.Minute mysqlShutdownTimeout = mysqlctl.DefaultShutdownTimeout @@ -105,7 +106,8 @@ func registerInitFlags(fs *pflag.FlagSet) { utils.SetFlagStringVar(fs, &tabletHostname, "tablet-hostname", tabletHostname, "if not empty, this hostname will be assumed instead of trying to resolve it") utils.SetFlagStringVar(fs, &initKeyspace, "init-keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet") utils.SetFlagStringVar(fs, &initShard, "init-shard", initShard, "(init parameter) shard to use for this tablet") - utils.SetFlagStringVar(fs, &initTabletType, "init-tablet-type", initTabletType, "(init parameter) tablet type to use for this tablet. Valid values are: PRIMARY, REPLICA, SPARE, and RDONLY. The default is REPLICA.") + utils.SetFlagStringVar(fs, &initTabletType, "init-tablet-type", initTabletType, "(init parameter) tablet type to use for this tablet. Valid values are: REPLICA, RDONLY, and SPARE. The default is REPLICA.") + fs.BoolVar(&initTabletTypeLookup, "init-tablet-type-lookup", initTabletTypeLookup, "(Experimental, init parameter) if enabled, uses tablet alias to look up the tablet type from the existing topology record on restart and use that instead of init-tablet-type. This allows tablets to maintain their changed roles (e.g., RDONLY/DRAINED) across restarts. If disabled or if no topology record exists, init-tablet-type will be used.") utils.SetFlagStringVar(fs, &initDbNameOverride, "init-db-name-override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_") utils.SetFlagStringVar(fs, &skipBuildInfoTags, "vttablet-skip-buildinfo-tags", skipBuildInfoTags, "comma-separated list of buildinfo tags to skip from merging with --init-tags. each tag is either an exact match or a regular expression of the form '/regexp/'.") utils.SetFlagVar(fs, &initTags, "init-tags", "(init parameter) comma separated list of key:value pairs used to tag the tablet") @@ -372,6 +374,42 @@ func (tm *TabletManager) Start(tablet *topodatapb.Tablet, config *tabletenv.Tabl tm.DBConfigs.DBName = topoproto.TabletDbName(tablet) tm.tabletAlias = tablet.Alias tm.tmc = tmclient.NewTabletManagerClient() + + // Check if there's an existing tablet record in topology and use it if flag is enabled + if initTabletTypeLookup { + ctx, cancel := context.WithTimeout(tm.BatchCtx, initTimeout) + defer cancel() + existingTablet, err := tm.TopoServer.GetTablet(ctx, tablet.Alias) + if err != nil && !topo.IsErrType(err, topo.NoNode) { + // Error other than "node doesn't exist" - return it + return vterrors.Wrap(err, "--init-tablet-type-lookup is enabled but failed to get existing tablet record from topology, unable to determine tablet type during startup") + } + + // If we found an existing tablet record, determine which type to use + switch { + case err != nil: + // No existing tablet record found, use init-tablet-type + log.Infof("No existing tablet record found, using init-tablet-type: %v", tablet.Type) + case existingTablet.Type == topodatapb.TabletType_PRIMARY: + // Don't set to PRIMARY yet - let checkPrimaryShip() validate and decide + // checkPrimaryShip() has the logic to verify shard records and determine if this tablet should really be PRIMARY + log.Infof("Found existing tablet record with PRIMARY type, setting to REPLICA and allowing checkPrimaryShip() to validate") + tablet.Type = topodatapb.TabletType_REPLICA + case existingTablet.Type == topodatapb.TabletType_BACKUP || existingTablet.Type == topodatapb.TabletType_RESTORE: + // Skip transient operational types (BACKUP, RESTORE) + // These are temporary states that should not be preserved across restarts + log.Infof("Found existing tablet record with transient type %v, using init-tablet-type %v instead", + existingTablet.Type, tablet.Type) + default: + // Safe to restore the type for non-PRIMARY, non-transient types + log.Infof("Found existing tablet record with --init-tablet-type-lookup enabled, using tablet type %v from topology instead of init-tablet-type %v", + existingTablet.Type, tablet.Type) + tablet.Type = existingTablet.Type + } + } else { + log.Infof("Using init-tablet-type %v", tablet.Type) + } + tm.tmState = newTMState(tm, tablet) tm.actionSema = semaphore.NewWeighted(1) tm._waitForGrantsComplete = make(chan struct{}) diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index 3d8b9fd132f..3758ab16fbb 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -961,3 +961,280 @@ func grantAllPrivilegesToUser(t *testing.T, connParams mysql.ConnParams, testUse require.NoError(t, err) conn.Close() } + +func TestInitTabletTypeLookup_PreservesTabletTypes(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + tests := []struct { + name string + preservedType topodatapb.TabletType + }{ + { + name: "RDONLY", + preservedType: topodatapb.TabletType_RDONLY, + }, + { + name: "DRAINED", + preservedType: topodatapb.TabletType_DRAINED, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // 1. Initialize tablet as REPLICA (normal startup) with flag disabled + initTabletTypeLookup = false + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + tablet := tm.Tablet() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + tm.Stop() + + // 2. Simulate operator changing tablet type in topology + _, err = ts.UpdateTabletFields(ctx, alias, func(t *topodatapb.Tablet) error { + t.Type = tt.preservedType + return nil + }) + require.NoError(t, err) + + // 3. Restart with flag enabled - should preserve the tablet type + initTabletTypeLookup = true + err = tm.Start(tablet, nil) + require.NoError(t, err) + defer tm.Stop() + ti, err = ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, tt.preservedType, ti.Type) + }) + } +} + +func TestInitTabletTypeLookup_PreservesPrimaryWithTermTime(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // 1. Initialize tablet as REPLICA with flag disabled + initTabletTypeLookup = false + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + tablet := tm.Tablet() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + tm.Stop() + + // 2. Simulate promotion to PRIMARY with a specific term start time + now := time.Now() + _, err = ts.UpdateTabletFields(ctx, alias, func(t *topodatapb.Tablet) error { + t.Type = topodatapb.TabletType_PRIMARY + t.PrimaryTermStartTime = protoutil.TimeToProto(now) + return nil + }) + require.NoError(t, err) + + // 3. Update shard's PrimaryAlias to point to this tablet so checkPrimaryShip will promote it + _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { + si.PrimaryAlias = alias + si.PrimaryTermStartTime = protoutil.TimeToProto(now) + return nil + }) + require.NoError(t, err) + + // 4. Restart with flag enabled - should set to REPLICA initially, then checkPrimaryShip promotes to PRIMARY + initTabletTypeLookup = true + err = tm.Start(tablet, nil) + require.NoError(t, err) + defer tm.Stop() + ti, err = ts.GetTablet(ctx, alias) + require.NoError(t, err) + // Should be promoted to PRIMARY by checkPrimaryShip and preserve the term start time + assert.Equal(t, topodatapb.TabletType_PRIMARY, ti.Type) + assert.Equal(t, now.Unix(), ti.GetPrimaryTermStartTime().Unix()) +} + +func TestInitTabletTypeLookup_FallbackWhenNoTopoRecord(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // Start new tablet with flag enabled but no existing topo record + initTabletTypeLookup = true + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + defer tm.Stop() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + // Should use initTabletType (REPLICA) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) +} + +func TestInitTabletTypeLookup_DisabledUsesInitType(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // 1. Initialize tablet as REPLICA with flag disabled + initTabletTypeLookup = false + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + tablet := tm.Tablet() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + tm.Stop() + + // 2. Simulate operator changing tablet to RDONLY in topology + _, err = ts.UpdateTabletFields(ctx, alias, func(t *topodatapb.Tablet) error { + t.Type = topodatapb.TabletType_RDONLY + return nil + }) + require.NoError(t, err) + + // 3. Restart with flag still disabled - should use initTabletType (REPLICA) + initTabletTypeLookup = false + err = tm.Start(tablet, nil) + require.NoError(t, err) + defer tm.Stop() + ti, err = ts.GetTablet(ctx, alias) + require.NoError(t, err) + // Topo record should be overwritten with REPLICA + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) +} + +func TestInitTabletTypeLookup_SkipsTransientTypes(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + tests := []struct { + name string + transientType topodatapb.TabletType + }{ + { + name: "BACKUP", + transientType: topodatapb.TabletType_BACKUP, + }, + { + name: "RESTORE", + transientType: topodatapb.TabletType_RESTORE, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // 1. Initialize tablet as REPLICA with flag disabled + initTabletTypeLookup = false + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + tablet := tm.Tablet() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + tm.Stop() + + // 2. Simulate crash during backup/restore (tablet type is transient in topo) + _, err = ts.UpdateTabletFields(ctx, alias, func(t *topodatapb.Tablet) error { + t.Type = tt.transientType + return nil + }) + require.NoError(t, err) + + // 3. Restart with flag enabled - should skip transient type and use initTabletType + initTabletTypeLookup = true + err = tm.Start(tablet, nil) + require.NoError(t, err) + defer tm.Stop() + ti, err = ts.GetTablet(ctx, alias) + require.NoError(t, err) + // Should use initTabletType (REPLICA), not preserve transient type + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + }) + } +} + +func TestInitTabletTypeLookup_InteractionWithCheckPrimaryShip(t *testing.T) { + defer func(saved bool) { initTabletTypeLookup = saved }(initTabletTypeLookup) + defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) + rebuildKeyspaceRetryInterval = 10 * time.Millisecond + + ctx := t.Context() + cell := "cell1" + ts := memorytopo.NewServer(ctx, cell) + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 1, + } + + // 1. Initialize tablet as REPLICA with flag disabled + initTabletTypeLookup = false + tm := newTestTM(t, ts, int(alias.Uid), "ks", "0", nil) + tablet := tm.Tablet() + ensureSrvKeyspace(t, ctx, ts, cell, "ks") + ti, err := ts.GetTablet(ctx, alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) + tm.Stop() + + // 2. Set shard's PrimaryAlias to this tablet + now := time.Now() + _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { + si.PrimaryAlias = alias + si.PrimaryTermStartTime = protoutil.TimeToProto(now) + return nil + }) + require.NoError(t, err) + + // 3. Restart with flag enabled - checkPrimaryShip should still promote to PRIMARY + initTabletTypeLookup = true + err = tm.Start(tablet, nil) + require.NoError(t, err) + defer tm.Stop() + ti, err = ts.GetTablet(ctx, alias) + require.NoError(t, err) + // Should be PRIMARY due to checkPrimaryShip logic + assert.Equal(t, topodatapb.TabletType_PRIMARY, ti.Type) +} diff --git a/go/vt/vttablet/tabletserver/querythrottler/config.go b/go/vt/vttablet/tabletserver/querythrottler/config.go index a424184abe5..11ed12da846 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/config.go +++ b/go/vt/vttablet/tabletserver/querythrottler/config.go @@ -16,7 +16,10 @@ limitations under the License. package querythrottler -import "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" +import ( + querythrottlerpb "vitess.io/vitess/go/vt/proto/querythrottler" + "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" +) // Compile-time interface compliance check var _ registry.StrategyConfig = (*Config)(nil) @@ -32,11 +35,30 @@ type Config struct { // throttling decision is logged for observability. DryRun bool `json:"dry_run"` - // Strategy selects which throttling strategy should be used. - Strategy registry.ThrottlingStrategy `json:"strategy"` + // StrategyName name of the strategy to use for throttling. + StrategyName registry.ThrottlingStrategy `json:"strategy"` } -// GetStrategy implements registry.StrategyConfig interface -func (c Config) GetStrategy() registry.ThrottlingStrategy { - return c.Strategy +// GetStrategyName implements registry.StrategyConfig interface +func (c Config) GetStrategyName() registry.ThrottlingStrategy { + return c.StrategyName +} + +// ConfigFromProto converts a protobuf QueryThrottler configuration into its internal Config representation. +// It processes the incoming configuration and creates a complete Config struct with all necessary mappings for tablet rules, statement rules, and metric rules. +func ConfigFromProto(queryThrottlerConfig *querythrottlerpb.Config) Config { + return Config{ + Enabled: queryThrottlerConfig.GetEnabled(), + DryRun: queryThrottlerConfig.GetDryRun(), + StrategyName: ThrottlingStrategyFromProto(queryThrottlerConfig.GetStrategy()), + } +} + +func ThrottlingStrategyFromProto(strategy querythrottlerpb.ThrottlingStrategy) registry.ThrottlingStrategy { + switch strategy { + case querythrottlerpb.ThrottlingStrategy_TABLET_THROTTLER: + return registry.ThrottlingStrategyTabletThrottler + default: + return registry.ThrottlingStrategyUnknown + } } diff --git a/go/vt/vttablet/tabletserver/querythrottler/config_loader_interface.go b/go/vt/vttablet/tabletserver/querythrottler/config_loader_interface.go deleted file mode 100644 index ce04ffe7d9c..00000000000 --- a/go/vt/vttablet/tabletserver/querythrottler/config_loader_interface.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2025 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package querythrottler - -import "context" - -type ConfigLoader interface { - // Load returns the latest throttler config (may come from file, topo, etc.) - Load(ctx context.Context) (Config, error) -} diff --git a/go/vt/vttablet/tabletserver/querythrottler/config_test.go b/go/vt/vttablet/tabletserver/querythrottler/config_test.go new file mode 100644 index 00000000000..d9ca175d4b1 --- /dev/null +++ b/go/vt/vttablet/tabletserver/querythrottler/config_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package querythrottler + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/proto/querythrottler" + "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" +) + +// Test_ConfigFromProto tests conversion from protobuf to internal Config struct. +func Test_ConfigFromProto(t *testing.T) { + tests := []struct { + name string + protoConfig *querythrottler.Config + expectedConfig Config + }{ + { + name: "EnabledWithTabletStrategy", + protoConfig: &querythrottler.Config{ + Enabled: true, + DryRun: false, + Strategy: querythrottler.ThrottlingStrategy_TABLET_THROTTLER, + TabletStrategyConfig: &querythrottler.TabletStrategyConfig{ + TabletRules: make(map[string]*querythrottler.StatementRuleSet), + }, + }, + expectedConfig: Config{ + Enabled: true, + DryRun: false, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + }, + }, + { + name: "DisabledDryRun", + protoConfig: &querythrottler.Config{ + Enabled: false, + DryRun: true, + Strategy: querythrottler.ThrottlingStrategy_TABLET_THROTTLER, + TabletStrategyConfig: &querythrottler.TabletStrategyConfig{ + TabletRules: make(map[string]*querythrottler.StatementRuleSet), + }, + }, + expectedConfig: Config{ + Enabled: false, + DryRun: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + }, + }, + { + name: "UnknownStrategy", + protoConfig: &querythrottler.Config{ + Enabled: true, + Strategy: querythrottler.ThrottlingStrategy(-1), // Invalid strategy + TabletStrategyConfig: &querythrottler.TabletStrategyConfig{ + TabletRules: make(map[string]*querythrottler.StatementRuleSet), + }, + }, + expectedConfig: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyUnknown, + }, + }, + { + name: "NilTabletStrategyConfig", + protoConfig: &querythrottler.Config{ + Enabled: true, + Strategy: querythrottler.ThrottlingStrategy_TABLET_THROTTLER, + TabletStrategyConfig: nil, + }, + expectedConfig: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + }, + }, + { + name: "WithMetricThresholds", + protoConfig: &querythrottler.Config{ + Enabled: true, + Strategy: querythrottler.ThrottlingStrategy_TABLET_THROTTLER, + TabletStrategyConfig: &querythrottler.TabletStrategyConfig{ + TabletRules: map[string]*querythrottler.StatementRuleSet{ + "PRIMARY": { + StatementRules: map[string]*querythrottler.MetricRuleSet{ + "SELECT": { + MetricRules: map[string]*querythrottler.MetricRule{ + "lag": { + Thresholds: []*querythrottler.ThrottleThreshold{ + {Above: 100, Throttle: 50}, + {Above: 200, Throttle: 75}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedConfig: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + }, + }, + { + name: "MultipleTabletTypes", + protoConfig: &querythrottler.Config{ + Enabled: true, + Strategy: querythrottler.ThrottlingStrategy_TABLET_THROTTLER, + TabletStrategyConfig: &querythrottler.TabletStrategyConfig{ + TabletRules: map[string]*querythrottler.StatementRuleSet{ + "PRIMARY": { + StatementRules: map[string]*querythrottler.MetricRuleSet{ + "SELECT": { + MetricRules: map[string]*querythrottler.MetricRule{ + "lag": { + Thresholds: []*querythrottler.ThrottleThreshold{ + {Above: 100, Throttle: 50}, + {Above: 200, Throttle: 75}, + }, + }, + }, + }, + }, + }, + "REPLICA": { + StatementRules: map[string]*querythrottler.MetricRuleSet{ + "SELECT": { + MetricRules: map[string]*querythrottler.MetricRule{ + "lag": { + Thresholds: []*querythrottler.ThrottleThreshold{ + {Above: 100, Throttle: 50}, + {Above: 200, Throttle: 75}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedConfig: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := ConfigFromProto(tt.protoConfig) + + require.Equal(t, tt.expectedConfig, cfg, "Config should match expected value") + }) + } +} diff --git a/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader.go b/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader.go deleted file mode 100644 index 68b78149b5c..00000000000 --- a/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2025 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package querythrottler - -import ( - "context" - "encoding/json" - "os" -) - -const defaultConfigPath = "/config/throttler-config.json" - -var _ ConfigLoader = (*FileBasedConfigLoader)(nil) - -// FileBasedConfigLoader implements ConfigLoader by reading configuration from a JSON file. -type FileBasedConfigLoader struct { - configPath string - readFile func(string) ([]byte, error) - unmarshal func([]byte, interface{}) error -} - -// NewFileBasedConfigLoader creates a new instance of FileBasedConfigLoader. -// It uses the standard config path "/config/throttler-config.json" and standard os.ReadFile and json.Unmarshal functions. -func NewFileBasedConfigLoader() *FileBasedConfigLoader { - return &FileBasedConfigLoader{ - configPath: defaultConfigPath, - readFile: os.ReadFile, - unmarshal: json.Unmarshal, - } -} - -// NewFileBasedConfigLoaderWithDeps creates a new instance with custom dependencies for testing. -// This allows injection of mock functions without global state modification. -func NewFileBasedConfigLoaderWithDeps(configPath string, readFile func(string) ([]byte, error), unmarshal func([]byte, interface{}) error) *FileBasedConfigLoader { - return &FileBasedConfigLoader{ - configPath: configPath, - readFile: readFile, - unmarshal: unmarshal, - } -} - -// Load reads the configuration from the configured file path. -func (f *FileBasedConfigLoader) Load(ctx context.Context) (Config, error) { - data, err := f.readFile(f.configPath) - if err != nil { - return Config{}, err - } - - var cfg Config - if unMarshalErr := f.unmarshal(data, &cfg); unMarshalErr != nil { - return Config{}, unMarshalErr - } - - return cfg, nil -} diff --git a/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader_test.go b/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader_test.go deleted file mode 100644 index 2c646e0569e..00000000000 --- a/go/vt/vttablet/tabletserver/querythrottler/file_based_config_loader_test.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2025 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package querythrottler - -import ( - "context" - "encoding/json" - "errors" - "testing" - - "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" - - "github.com/stretchr/testify/require" -) - -func TestNewFileBasedConfigLoader(t *testing.T) { - loader := NewFileBasedConfigLoader() - require.NotNil(t, loader) - require.IsType(t, &FileBasedConfigLoader{}, loader) - require.Equal(t, defaultConfigPath, loader.configPath) -} - -func TestFileBasedConfigLoader_Load(t *testing.T) { - tests := []struct { - name string - configPath string - mockReadFile func(filename string) ([]byte, error) - mockJsonUnmarshal func(data []byte, v interface{}) error - expectedConfig Config - expectedError string - }{ - { - name: "successful config load with minimal config", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{"enabled": true, "strategy": "TabletThrottler"}`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{ - Enabled: true, - Strategy: registry.ThrottlingStrategyTabletThrottler, - }, - }, - { - name: "successful config load with disabled throttler", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{"enabled": false, "strategy": "TabletThrottler"}`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{ - Enabled: false, - Strategy: registry.ThrottlingStrategyTabletThrottler, - }, - }, - { - name: "file read error - file not found", - configPath: "/nonexistent/config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/nonexistent/config.json", filename) - return nil, errors.New("no such file or directory") - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{}, - expectedError: "no such file or directory", - }, - { - name: "successful config load with dry run as enabled", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{"enabled": true, "strategy": "TabletThrottler", "dry_run": true}`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{ - Enabled: true, - Strategy: registry.ThrottlingStrategyTabletThrottler, - DryRun: true, - }, - }, - { - name: "file read error - permission denied", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return nil, errors.New("permission denied") - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{}, - expectedError: "permission denied", - }, - { - name: "json unmarshal error - invalid json", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{"enabled": true`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{}, - expectedError: "unexpected end of JSON input", - }, - { - name: "json unmarshal error - invalid field type", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{"enabled": "not_a_boolean", "strategy": "TabletThrottler"}`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{}, - expectedError: "json: cannot unmarshal string into Go struct field Config.enabled of type bool", - }, - { - name: "empty file - should unmarshal to zero value config", - configPath: "/config/throttler-config.json", - mockReadFile: func(filename string) ([]byte, error) { - require.Equal(t, "/config/throttler-config.json", filename) - return []byte(`{}`), nil - }, - mockJsonUnmarshal: func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - }, - expectedConfig: Config{ - Enabled: false, - Strategy: "", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create loader with injected dependencies - loader := NewFileBasedConfigLoaderWithDeps(tt.configPath, tt.mockReadFile, tt.mockJsonUnmarshal) - - // Test - config, err := loader.Load(context.Background()) - - // Assert - if tt.expectedError != "" { - require.Error(t, err) - require.EqualError(t, err, tt.expectedError) - require.Equal(t, tt.expectedConfig, config) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedConfig, config) - } - }) - } -} - -func TestFileBasedConfigLoader_Load_ConfigPath(t *testing.T) { - // Test that the production loader uses the default config path - var capturedPath string - - mockReadFile := func(filename string) ([]byte, error) { - capturedPath = filename - return []byte(`{"enabled": true, "strategy": "TabletThrottler"}`), nil - } - - mockJsonUnmarshal := func(data []byte, v interface{}) error { - return json.Unmarshal(data, v) - } - - // Test with production constructor (should use default path) - loader := NewFileBasedConfigLoaderWithDeps(defaultConfigPath, mockReadFile, mockJsonUnmarshal) - _, err := loader.Load(context.Background()) - - require.NoError(t, err) - require.Equal(t, "/config/throttler-config.json", capturedPath) -} - -func TestFileBasedConfigLoader_ImplementsConfigLoader(t *testing.T) { - // Verify that FileBasedConfigLoader implements ConfigLoader interface - var _ ConfigLoader = (*FileBasedConfigLoader)(nil) - - // This should compile without issues, proving interface compliance - loader := NewFileBasedConfigLoader() - require.NotNil(t, loader) -} - -func TestNewFileBasedConfigLoaderWithDeps(t *testing.T) { - configPath := "/test/config.json" - mockReadFile := func(string) ([]byte, error) { return nil, nil } - mockUnmarshal := func([]byte, interface{}) error { return nil } - - loader := NewFileBasedConfigLoaderWithDeps(configPath, mockReadFile, mockUnmarshal) - - require.NotNil(t, loader) - require.Equal(t, configPath, loader.configPath) - // Note: We can't directly test function equality, but the constructor should set them -} - -func TestFileBasedConfigLoader_UsesDefaultPath(t *testing.T) { - // Test that the production constructor uses the default path - loader := NewFileBasedConfigLoader() - require.Equal(t, "/config/throttler-config.json", loader.configPath) -} diff --git a/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go b/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go index df78990a565..a0a184db821 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go +++ b/go/vt/vttablet/tabletserver/querythrottler/query_throttler.go @@ -18,11 +18,14 @@ package querythrottler import ( "context" + "errors" "strconv" "sync" - "time" + "sync/atomic" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" @@ -42,36 +45,41 @@ const ( ) type QueryThrottler struct { - ctx context.Context + ctx context.Context + cancelWatchContext context.CancelFunc + throttleClient *throttle.Client tabletConfig *tabletenv.TabletConfig - mu sync.RWMutex + + keyspace string + cell string + srvTopoServer srvtopo.Server + + mu sync.RWMutex + watchStarted atomic.Bool + // cfg holds the current configuration for the throttler. cfg Config - // cfgLoader is responsible for loading the configuration. - cfgLoader ConfigLoader - // strategy is the current throttling strategy handler. - strategy registry.ThrottlingStrategyHandler + // strategyHandlerInstance is the current throttling strategy handler instance + strategyHandlerInstance registry.ThrottlingStrategyHandler } // NewQueryThrottler creates a new query throttler. -func NewQueryThrottler(ctx context.Context, throttler *throttle.Throttler, cfgLoader ConfigLoader, env tabletenv.Env) *QueryThrottler { +func NewQueryThrottler(ctx context.Context, throttler *throttle.Throttler, env tabletenv.Env, alias *topodatapb.TabletAlias, srvTopoServer srvtopo.Server) *QueryThrottler { client := throttle.NewBackgroundClient(throttler, throttlerapp.QueryThrottlerName, base.UndefinedScope) qt := &QueryThrottler{ - ctx: ctx, - throttleClient: client, - tabletConfig: env.Config(), - cfg: Config{}, - cfgLoader: cfgLoader, - strategy: ®istry.NoOpStrategy{}, // default strategy until config is loaded + ctx: ctx, + throttleClient: client, + tabletConfig: env.Config(), + cell: alias.GetCell(), + srvTopoServer: srvTopoServer, + cfg: Config{}, + strategyHandlerInstance: ®istry.NoOpStrategy{}, // default strategy until config is loaded } // Start the initial strategy - qt.strategy.Start() - - // starting the loop which will be responsible for refreshing the config. - qt.startConfigRefreshLoop() + qt.strategyHandlerInstance.Start() return qt } @@ -82,12 +90,43 @@ func (qt *QueryThrottler) Shutdown() { qt.mu.Lock() defer qt.mu.Unlock() + // Cancel the watch context to stop the watch goroutine + if qt.cancelWatchContext != nil { + qt.cancelWatchContext() + } + + // Reset the watch started flag to allow restarting the watch if needed + qt.watchStarted.Store(false) + // Stop the current strategy to clean up any background processes - if qt.strategy != nil { - qt.strategy.Stop() + if qt.strategyHandlerInstance != nil { + qt.strategyHandlerInstance.Stop() } } +// InitDBConfig initializes the keyspace for the config watch and loads the initial configuration. +// This method is called by TabletServer during the tablet initialization sequence (see +// go/vt/vttablet/tabletserver/tabletserver.go:InitDBConfig), which happens when: +// - A tablet first starts up +// - A tablet restarts after a crash or upgrade +// - A new tablet node is added to the cluster +// +// Why initial config loading is critical: +// When a tablet starts (or restarts), it needs to immediately have the correct throttling +// configuration from the topology server. Without this, the tablet would run with the default +// NoOp strategy until the next configuration update is pushed to the topology, which could +// result in: +// - Unthrottled queries overwhelming a recovering tablet +// - Inconsistent throttling behavior across the fleet during rolling restarts +// - Missing critical throttling rules during high-load periods +func (qt *QueryThrottler) InitDBConfig(keyspace string) { + qt.keyspace = keyspace + log.Infof("QueryThrottler: initialized with keyspace=%s", keyspace) + + // Start the topo server watch post the keyspace is set. + qt.startSrvKeyspaceWatch() +} + // Throttle checks if the tablet is under heavy load // and enforces throttling by rejecting the incoming request if necessary. // Note: This method performs lock-free reads of config and strategy for optimal performance. @@ -108,7 +147,7 @@ func (qt *QueryThrottler) Throttle(ctx context.Context, tabletType topodatapb.Ta } // Evaluate the throttling decision - decision := qt.strategy.Evaluate(ctx, tabletType, parsedQuery, transactionID, attrs) + decision := qt.strategyHandlerInstance.Evaluate(ctx, tabletType, parsedQuery, transactionID, attrs) // If no throttling is needed, allow the query if !decision.Throttle { @@ -125,6 +164,59 @@ func (qt *QueryThrottler) Throttle(ctx context.Context, tabletType topodatapb.Ta return vterrors.New(vtrpcpb.Code_RESOURCE_EXHAUSTED, decision.Message) } +// startSrvKeyspaceWatch starts watching the SrvKeyspace for event-driven config updates. +// This method performs two critical operations: +// 1. Initial Configuration Load (with retry): +// Fetches the current SrvKeyspace configuration from the topology server using GetSrvKeyspace. +// This is essential for tablets starting up or restarting, as they need immediate access to +// throttling rules without waiting for a configuration change event. +// 2. Watch Establishment: +// Starts a background goroutine that watches for future SrvKeyspace changes using WatchSrvKeyspace. +// This ensures the tablet receives real-time configuration updates throughout its lifecycle. +// +// Thread Safety: This method uses the watchStarted atomic flag to ensure it only runs once, even if called +// concurrently. Only the first caller will actually start the watch; subsequent calls return early. +func (qt *QueryThrottler) startSrvKeyspaceWatch() { + // Pre-flight validation: ensure required fields are set + if qt.srvTopoServer == nil || qt.keyspace == "" { + log.Errorf("QueryThrottler: cannot start SrvKeyspace watch, srvTopoServer=%v, keyspace=%s", qt.srvTopoServer != nil, qt.keyspace) + return + } + + // Phase 1: Load initial configuration with retry logic + // This ensures tablets have the correct throttling config immediately after startup/restart. + // TODO(Siddharth) add retry for this initial load + srvKS, err := qt.srvTopoServer.GetSrvKeyspace(qt.ctx, qt.cell, qt.keyspace) + if err != nil { + log.Warningf("QueryThrottler: failed to load initial config for keyspace=%s (GetSrvKeyspace): %v", qt.keyspace, err) + } + if srvKS == nil { + log.Warningf("QueryThrottler: srv keyspace fetched is nil for keyspace=%s ", qt.keyspace) + } + qt.HandleConfigUpdate(srvKS, nil) + + // Phase 2: Start the watch for future configuration updates + // Always start the watch, even if initial load failed, to enable recovery when config becomes available + + // Only start the watch once (protected by atomic flag) + if !qt.watchStarted.CompareAndSwap(false, true) { + log.Infof("QueryThrottler: SrvKeyspace watch already started for keyspace=%s", qt.keyspace) + return + } + watchCtx, cancel := context.WithCancel(qt.ctx) + qt.cancelWatchContext = cancel + + go func() { + // WatchSrvKeyspace will: + // 1. Provide the current value immediately (may duplicate our GetSrvKeyspace result, but deduped) + // 2. Stream future configuration updates via the callback + // 3. Automatically retry on transient errors (handled by resilient watcher) + qt.srvTopoServer.WatchSrvKeyspace(watchCtx, qt.cell, qt.keyspace, qt.HandleConfigUpdate) + }() + + log.Infof("QueryThrottler: started event-driven watch for SrvKeyspace keyspace=%s cell=%s", qt.keyspace, qt.cell) +} + // extractWorkloadName extracts the workload name from ExecuteOptions. // If no workload name is provided, returns a default value. func extractWorkloadName(options *querypb.ExecuteOptions) string { @@ -162,6 +254,88 @@ func extractPriority(options *querypb.ExecuteOptions) int { return optionsPriority } +// HandleConfigUpdate is the callback invoked when the SrvKeyspace topology changes. +// It loads the updated configuration from the topo server and updates the QueryThrottler's +// strategy and configuration accordingly. +// +// IMPORTANT: This method is designed ONLY to be called as a callback from srvtopo.WatchSrvKeyspace. +// It relies on the resilient watcher's auto-retry behavior (see go/vt/srvtopo/watch.go) and should +// not be called directly from other contexts. +// +// Return value contract (required by WatchSrvKeyspace): +// - true: Continue watching (resilient watcher will auto-retry on transient errors) +// - false: Stop watching permanently (for fatal errors like NoNode, context canceled, or Interrupted) +// +// **NOTE: this method is written with the assumption that this is the only piece of code which will be changing the config of QueryThrottler** +func (qt *QueryThrottler) HandleConfigUpdate(srvks *topodatapb.SrvKeyspace, err error) bool { + // Handle topology errors using a hybrid approach: + // - Permanent errors (NoNode, context canceled): stop watching (return false) + // - Transient errors (network issues, etc.): keep watching (return true, auto-retry will reconnect) + if err != nil { + // Keyspace deleted from topology - stop watching + if topo.IsErrType(err, topo.NoNode) { + log.Warningf("HandleConfigUpdate: keyspace %s deleted or not found, stopping watch", qt.keyspace) + return false + } + + // Context canceled or interrupted - graceful shutdown, stop watching + if errors.Is(err, context.Canceled) || topo.IsErrType(err, topo.Interrupted) { + log.Infof("HandleConfigUpdate: watch stopped (context canceled or interrupted)") + return false + } + + // Transient error (network, temporary topo server issue) - keep watching + // The resilient watcher will automatically retry as defined in go/vt/srvtopo/resilient_server.go:46 + log.Warningf("HandleConfigUpdate: transient topo watch error (will retry): %v", err) + return true + } + + if srvks == nil { + log.Warningf("HandleConfigUpdate: srvks is nil") + return true + } + + // Get the query throttler configuration from the SrvKeyspace that the QueryThrottler uses to manage its throttling behavior. + iqtConfig := srvks.GetQueryThrottlerConfig() + newCfg := ConfigFromProto(iqtConfig) + + // If the config is not changed, return early. + if !isConfigUpdateRequired(qt.cfg, newCfg) { + return true + } + + // No Locking is required because only this function updates the configs of Query Throttler. + needsStrategyChange := qt.cfg.GetStrategyName() != newCfg.GetStrategyName() + oldStrategyInstance := qt.strategyHandlerInstance + + var newStrategy registry.ThrottlingStrategyHandler + if needsStrategyChange { + // Create the new strategy (doesn't need lock) + newStrategy = selectThrottlingStrategy(newCfg, qt.throttleClient, qt.tabletConfig) + } + + // Acquire write lock only for the actual swap operation + qt.mu.Lock() + if needsStrategyChange { + qt.strategyHandlerInstance = newStrategy + // Start a new strategy after assignment, still under lock for consistency. + if newStrategy != nil { + newStrategy.Start() + } + } + // Always update the configuration + qt.cfg = newCfg + qt.mu.Unlock() + + // Stop the old strategy (if needed) outside the lock to avoid blocking. + if needsStrategyChange && oldStrategyInstance != nil { + oldStrategyInstance.Stop() + } + + log.Infof("HandleConfigUpdate: config updated, strategy=%s, enabled=%v", newCfg.GetStrategyName(), newCfg.Enabled) + return true +} + // selectThrottlingStrategy returns the appropriate strategy implementation based on the config. func selectThrottlingStrategy(cfg Config, client *throttle.Client, tabletConfig *tabletenv.TabletConfig) registry.ThrottlingStrategyHandler { deps := registry.Deps{ @@ -171,47 +345,21 @@ func selectThrottlingStrategy(cfg Config, client *throttle.Client, tabletConfig return registry.CreateStrategy(cfg, deps) } -// startConfigRefreshLoop launches a background goroutine that refreshes the throttler's configuration -// at the interval specified by QueryThrottlerConfigRefreshInterval. -func (qt *QueryThrottler) startConfigRefreshLoop() { - go func() { - refreshInterval := qt.tabletConfig.QueryThrottlerConfigRefreshInterval - configRefreshTicker := time.NewTicker(refreshInterval) - defer configRefreshTicker.Stop() - - for { - select { - case <-qt.ctx.Done(): - return - case <-configRefreshTicker.C: - newCfg, err := qt.cfgLoader.Load(qt.ctx) - if err != nil { - log.Errorf("Error loading config: %v", err) - continue - } - - // Only restart strategy if the strategy type has changed - if qt.cfg.Strategy != newCfg.Strategy { - // Stop the current strategy before switching to a new one - if qt.strategy != nil { - qt.strategy.Stop() - } - - newStrategy := selectThrottlingStrategy(newCfg, qt.throttleClient, qt.tabletConfig) - // Update strategy and start the new one - qt.mu.Lock() - qt.strategy = newStrategy - qt.mu.Unlock() - if qt.strategy != nil { - qt.strategy.Start() - } - } - - // Always update the configuration - qt.mu.Lock() - qt.cfg = newCfg - qt.mu.Unlock() - } - } - }() +// isConfigUpdateRequired checks if the new config is different from the old config. +// This only checks for enabled, strategy name, and dry run because the strategy itself will update the strategy-specific config +// during runtime by having a separate watcher similar to the one used in QueryThrottler. +func isConfigUpdateRequired(oldCfg, newCfg Config) bool { + if oldCfg.Enabled != newCfg.Enabled { + return true + } + + if oldCfg.StrategyName != newCfg.StrategyName { + return true + } + + if oldCfg.DryRun != newCfg.DryRun { + return true + } + + return false } diff --git a/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go b/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go index f1b3094749f..6548b5f9f6e 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go +++ b/go/vt/vttablet/tabletserver/querythrottler/query_throttler_test.go @@ -18,7 +18,9 @@ package querythrottler import ( "context" + "errors" "fmt" + "sync" "testing" "time" @@ -26,6 +28,9 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/srvtopo/srvtopotest" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" @@ -37,43 +42,6 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" ) -func TestNewQueryThrottler_ConfigRefresh(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - config := &tabletenv.TabletConfig{ - QueryThrottlerConfigRefreshInterval: 10 * time.Millisecond, - } - env := tabletenv.NewEnv(vtenv.NewTestEnv(), config, "TestThrottler") - - throttler := &throttle.Throttler{} // use mock if needed - iqt := NewQueryThrottler(ctx, throttler, newFakeConfigLoader(Config{ - Enabled: true, - Strategy: registry.ThrottlingStrategyTabletThrottler, - }), env) - - // Assert initial state (should be NoOpStrategy) - require.NotNil(t, iqt) - iqt.mu.RLock() - initialStrategy := iqt.strategy - iqt.mu.RUnlock() - require.IsType(t, ®istry.NoOpStrategy{}, initialStrategy) - - require.Eventually(t, func() bool { - iqt.mu.RLock() - defer iqt.mu.RUnlock() - - // Assert updated cfg and strategy after config refresh - if !iqt.cfg.Enabled { - return false - } - if iqt.cfg.Strategy != registry.ThrottlingStrategyTabletThrottler { - return false - } - return true - }, 1*time.Second, 10*time.Millisecond, "Config should be refreshed and strategy should be updated") -} - func TestSelectThrottlingStrategy(t *testing.T) { tests := []struct { name string @@ -95,7 +63,7 @@ func TestSelectThrottlingStrategy(t *testing.T) { QueryThrottlerConfigRefreshInterval: 10 * time.Millisecond, } - strategy := selectThrottlingStrategy(Config{Enabled: true, Strategy: tt.giveThrottlingStrategy}, mockClient, config) + strategy := selectThrottlingStrategy(Config{Enabled: true, StrategyName: tt.giveThrottlingStrategy}, mockClient, config) require.IsType(t, tt.expectedType, strategy) }) @@ -114,20 +82,19 @@ func TestQueryThrottler_StrategyLifecycleManagement(t *testing.T) { } env := tabletenv.NewEnv(vtenv.NewTestEnv(), config, "TestThrottler") - iqt := NewQueryThrottler(ctx, throttler, newFakeConfigLoader(Config{ - Enabled: true, - Strategy: registry.ThrottlingStrategyTabletThrottler, - }), env) + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + + iqt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) // Verify initial strategy was started (NoOpStrategy in this case) - require.NotNil(t, iqt.strategy) + require.NotNil(t, iqt.strategyHandlerInstance) // Test Shutdown properly stops the strategy iqt.Shutdown() // After shutdown, the strategy should have been stopped // In a real test, we would verify the strategy's Stop method was called - require.NotNil(t, iqt.strategy) // Strategy reference should still exist but be stopped + require.NotNil(t, iqt.strategyHandlerInstance) // Strategy reference should still exist but be stopped } // TestQueryThrottler_Shutdown tests the Shutdown method. @@ -141,10 +108,9 @@ func TestQueryThrottler_Shutdown(t *testing.T) { env := tabletenv.NewEnv(vtenv.NewTestEnv(), config, "TestThrottler") throttler := &throttle.Throttler{} - iqt := NewQueryThrottler(ctx, throttler, newFakeConfigLoader(Config{ - Enabled: false, - Strategy: registry.ThrottlingStrategyTabletThrottler, - }), env) + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + + iqt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) // Should not panic when called multiple times iqt.Shutdown() @@ -152,7 +118,7 @@ func TestQueryThrottler_Shutdown(t *testing.T) { // Should still be able to check the strategy reference iqt.mu.RLock() - strategy := iqt.strategy + strategy := iqt.strategyHandlerInstance iqt.mu.RUnlock() require.NotNil(t, strategy) } @@ -259,7 +225,7 @@ func TestIncomingQueryThrottler_DryRunMode(t *testing.T) { Enabled: tt.enabled, DryRun: tt.dryRun, }, - strategy: mockStrategy, + strategyHandlerInstance: mockStrategy, } // Capture log output @@ -303,38 +269,6 @@ func TestIncomingQueryThrottler_DryRunMode(t *testing.T) { } } -// mockThrottlingStrategy is a test strategy that allows us to control throttling decisions -type mockThrottlingStrategy struct { - decision registry.ThrottleDecision - started bool - stopped bool -} - -func (m *mockThrottlingStrategy) Evaluate(ctx context.Context, targetTabletType topodatapb.TabletType, parsedQuery *sqlparser.ParsedQuery, transactionID int64, attrs registry.QueryAttributes) registry.ThrottleDecision { - return m.decision -} - -func (m *mockThrottlingStrategy) Start() { - m.started = true -} - -func (m *mockThrottlingStrategy) Stop() { - m.stopped = true -} - -func (m *mockThrottlingStrategy) GetStrategyName() string { - return "MockStrategy" -} - -// testLogCapture captures log output for testing -type testLogCapture struct { - logs []string -} - -func (lc *testLogCapture) captureLog(msg string, args ...interface{}) { - lc.logs = append(lc.logs, fmt.Sprintf(msg, args...)) -} - func TestQueryThrottler_extractWorkloadName(t *testing.T) { tests := []struct { name string @@ -453,3 +387,626 @@ func TestQueryThrottler_extractPriority(t *testing.T) { }) } } + +// TestQueryThrottler_HandleConfigUpdate_ErrorHandling verifies callback behavior for different error types. +func TestQueryThrottler_HandleConfigUpdate_ErrorHandling(t *testing.T) { + tests := []struct { + name string + inputErr error + expectedResult bool + description string + }{ + { + name: "ContextCanceledError", + inputErr: context.Canceled, + expectedResult: false, + description: "callback should return false to stop watching on context cancellation", + }, + { + name: "TransientTopoError", + inputErr: errors.New("topo error: transient error"), + expectedResult: true, + description: "callback should return true and continue watching on transient errors", + }, + { + name: "NoNodeError", + inputErr: topo.NewError(topo.NoNode, "keyspace/test_keyspace"), + expectedResult: false, + description: "callback should return false to stop watching when keyspace is deleted (NoNode)", + }, + { + name: "InterruptedError", + inputErr: topo.NewError(topo.Interrupted, "watch interrupted"), + expectedResult: false, + description: "callback should return false to stop watching on Interrupted error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + qt := &QueryThrottler{ + ctx: ctx, + keyspace: "test-keyspace", + cfg: Config{Enabled: true, StrategyName: registry.ThrottlingStrategyTabletThrottler}, + strategyHandlerInstance: ®istry.NoOpStrategy{}, + tabletConfig: &tabletenv.TabletConfig{}, + } + + // Create a valid SrvKeyspace matching the test setup (errors are checked before srvks is used) + srvks := createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, false) + + result := qt.HandleConfigUpdate(srvks, tt.inputErr) + + require.Equal(t, tt.expectedResult, result, tt.description) + }) + } +} + +// TestQueryThrottler_HandleConfigUpdate__ConfigExtraction verifies config is properly extracted from SrvKeyspace. +func TestQueryThrottler_HandleConfigUpdate__ConfigExtraction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + oldCfg := Config{Enabled: false, StrategyName: registry.ThrottlingStrategyTabletThrottler, DryRun: false} + oldStrategy := ®istry.NoOpStrategy{} + + qt := &QueryThrottler{ + ctx: ctx, + cfg: oldCfg, + strategyHandlerInstance: oldStrategy, + tabletConfig: &tabletenv.TabletConfig{}, + throttleClient: &throttle.Client{}, + } + + // Create SrvKeyspace with different config values + srvks := createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, true) + + result := qt.HandleConfigUpdate(srvks, nil) + + // Should return true to continue watching, config should be extracted from SrvKeyspace + require.True(t, result, "callback should return true and continue watching") + + qt.mu.RLock() + require.True(t, qt.cfg.Enabled, "Enabled should be updated from SrvKeyspace") + require.True(t, qt.cfg.DryRun, "DryRun should be updated from SrvKeyspace") + require.Equal(t, registry.ThrottlingStrategyTabletThrottler, qt.cfg.StrategyName, "strategy should remain TabletThrottler") + qt.mu.RUnlock() +} + +// TestQueryThrottler_HandleConfigUpdate__SuccessfulConfigUpdate tests successful config update when strategy doesn't change. +func TestQueryThrottler_HandleConfigUpdate__SuccessfulConfigUpdate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Use a mock strategy to track state changes + oldStrategy := &mockThrottlingStrategy{} + + // Both initial and new config have the same strategy TYPE (no swap expected) + unchangedStrategyType := registry.ThrottlingStrategyTabletThrottler + + qt := &QueryThrottler{ + ctx: ctx, + cfg: Config{Enabled: true, StrategyName: unchangedStrategyType, DryRun: false}, + strategyHandlerInstance: oldStrategy, + tabletConfig: &tabletenv.TabletConfig{}, + } + + // Create SrvKeyspace with same strategy but DryRun changed + srvks := createTestSrvKeyspace(true, unchangedStrategyType, true) + + result := qt.HandleConfigUpdate(srvks, nil) + + require.True(t, result, "callback should return true") + + qt.mu.RLock() + require.True(t, qt.cfg.DryRun, "DryRun config should be updated") + require.Equal(t, unchangedStrategyType, qt.cfg.GetStrategyName(), "strategy type should remain the same") + require.Equal(t, oldStrategy, qt.strategyHandlerInstance, "strategy instance should not change when type is same") + // Verify the old strategy was NOT stopped (no swap occurred) + require.False(t, oldStrategy.stopped, "old strategy should NOT be stopped when type doesn't change") + qt.mu.RUnlock() +} + +// TestQueryThrottler_HandleConfigUpdate__StrategySwitch tests that strategy is properly switched when strategy type changes. +func TestQueryThrottler_HandleConfigUpdate__StrategySwitch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + oldStrategy := &mockThrottlingStrategy{} + + qt := &QueryThrottler{ + ctx: ctx, + cfg: Config{Enabled: true, StrategyName: registry.ThrottlingStrategyTabletThrottler}, + strategyHandlerInstance: oldStrategy, + tabletConfig: &tabletenv.TabletConfig{}, + throttleClient: &throttle.Client{}, + } + + srvks := createTestSrvKeyspace(true, registry.ThrottlingStrategyUnknown, false) + + result := qt.HandleConfigUpdate(srvks, nil) + + // Strategy should be switched + require.True(t, result, "callback should return true") + + qt.mu.RLock() + require.Equal(t, registry.ThrottlingStrategyUnknown, qt.cfg.GetStrategyName(), "config strategy should be updated") + // Old strategy should have been stopped (mocked strategy tracks this) + require.True(t, oldStrategy.stopped, "old strategy should be stopped") + // New strategy should be different instance + newStrategyInstance := qt.strategyHandlerInstance + qt.mu.RUnlock() + + require.NotEqual(t, fmt.Sprintf("%p", oldStrategy), fmt.Sprintf("%p", newStrategyInstance), + "strategy instance should be different after type change") +} + +// TestQueryThrottler_HandleConfigUpdate__NoChange tests that nothing changes when the config is identical. +func TestQueryThrottler_HandleConfigUpdate__NoChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + unchangedCfg := Config{Enabled: true, StrategyName: registry.ThrottlingStrategyTabletThrottler, DryRun: false} + oldStrategy := ®istry.NoOpStrategy{} + + qt := &QueryThrottler{ + ctx: ctx, + cfg: unchangedCfg, + strategyHandlerInstance: oldStrategy, + tabletConfig: &tabletenv.TabletConfig{}, + } + + // Create SrvKeyspace with identical config + srvks := createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, false) + + result := qt.HandleConfigUpdate(srvks, nil) + + // Config and strategy should remain same + require.True(t, result, "callback should return true") + + qt.mu.RLock() + require.Equal(t, unchangedCfg, qt.cfg, "config should remain unchanged") + require.Equal(t, oldStrategy, qt.strategyHandlerInstance, "strategy should remain unchanged") + qt.mu.RUnlock() +} + +// TestIsConfigUpdateRequired tests the isConfigUpdateRequired function. +func TestIsConfigUpdateRequired(t *testing.T) { + tests := []struct { + name string + oldCfg Config + newCfg Config + expected bool + }{ + { + name: "No changes - configs identical", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + expected: false, + }, + { + name: "Enabled changed from true to false", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: false, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + expected: true, + }, + { + name: "Enabled changed from false to true", + oldCfg: Config{ + Enabled: false, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + expected: true, + }, + { + name: "DryRun changed from false to true", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: true, + }, + expected: true, + }, + { + name: "DryRun changed from true to false", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: true, + }, + newCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + expected: true, + }, + { + name: "Multiple fields changed - Enabled and DryRun", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: false, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: true, + }, + expected: true, + }, + { + name: "Multiple fields changed - Enabled and StrategyName", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: false, + StrategyName: registry.ThrottlingStrategyUnknown, + DryRun: false, + }, + expected: true, + }, + { + name: "Multiple fields changed - StrategyName and DryRun", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyUnknown, + DryRun: true, + }, + expected: true, + }, + { + name: "All three fields changed", + oldCfg: Config{ + Enabled: true, + StrategyName: registry.ThrottlingStrategyTabletThrottler, + DryRun: false, + }, + newCfg: Config{ + Enabled: false, + StrategyName: registry.ThrottlingStrategyUnknown, + DryRun: true, + }, + expected: true, + }, + { + name: "All fields false/empty - no change", + oldCfg: Config{ + Enabled: false, + StrategyName: "", + DryRun: false, + }, + newCfg: Config{ + Enabled: false, + StrategyName: "", + DryRun: false, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isConfigUpdateRequired(tt.oldCfg, tt.newCfg) + require.Equal(t, tt.expected, result) + }) + } +} + +// TestQueryThrottler_startSrvKeyspaceWatch_InitialLoad tests that initial configuration is loaded successfully when GetSrvKeyspace succeeds. +func TestQueryThrottler_startSrvKeyspaceWatch_InitialLoad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + srvTopoServer.SrvKeyspace = createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, false) + srvTopoServer.SrvKeyspaceError = nil + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) + + qt.InitDBConfig("test_keyspace") + + // Verify watch was started + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should have been started") + + // Verify that the configuration was loaded correctly + require.Eventually(t, func() bool { + qt.mu.RLock() + defer qt.mu.RUnlock() + return qt.cfg.Enabled && + qt.cfg.StrategyName == registry.ThrottlingStrategyTabletThrottler && + !qt.cfg.DryRun + }, 2*time.Second, 10*time.Millisecond, "Config should be loaded correctly: enabled=true, strategy=TabletThrottler, dryRun=false") + + require.Equal(t, "test_keyspace", qt.keyspace, "Keyspace should be set correctly") +} + +// TestQueryThrottler_startSrvKeyspaceWatch_InitialLoadFailure tests that watch starts even when initial GetSrvKeyspace fails. +func TestQueryThrottler_startSrvKeyspaceWatch_InitialLoadFailure(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + // Configure PassthroughSrvTopoServer to return an error on GetSrvKeyspace + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + srvTopoServer.SrvKeyspace = nil + srvTopoServer.SrvKeyspaceError = fmt.Errorf("failed to fetch keyspace") + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) + + // Initialize with keyspace to trigger startSrvKeyspaceWatch + qt.InitDBConfig("test_keyspace") + + // Verify watch was started despite initial load failure + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should be started even if initial load fails") + + require.Equal(t, "test_keyspace", qt.keyspace, "Keyspace should be set correctly") + + // Configuration should remain at default (NoOpStrategy) due to failure + require.Eventually(t, func() bool { + qt.mu.RLock() + defer qt.mu.RUnlock() + return !qt.cfg.Enabled + }, 2*time.Second, 10*time.Millisecond, "Config should remain disabled after initial load failure") +} + +// TestQueryThrottler_startSrvKeyspaceWatch_OnlyStartsOnce tests that watch only starts once even with concurrent calls (atomic flag protection). +func TestQueryThrottler_startSrvKeyspaceWatch_OnlyStartsOnce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + srvTopoServer.SrvKeyspace = createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, false) + srvTopoServer.SrvKeyspaceError = nil + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) + + qt.InitDBConfig("test_keyspace") + + // Attempt to start the watch multiple times concurrently + const numGoroutines = 10 + startedCount := 0 + var wg sync.WaitGroup + var mu sync.Mutex + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // Each goroutine tries to start the watch + qt.startSrvKeyspaceWatch() + mu.Lock() + startedCount++ + mu.Unlock() + }() + } + + // Wait for all goroutines to complete + wg.Wait() + + // Verify that the watch was started exactly once (atomic flag prevents multiple starts) + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should have been started") + + require.Equal(t, numGoroutines, startedCount, "All goroutines should have called startSrvKeyspaceWatch") +} + +// TestQueryThrottler_startSrvKeyspaceWatch_RequiredFieldsValidation tests that watch doesn't start when required fields are missing. +func TestQueryThrottler_startSrvKeyspaceWatch_RequiredFieldsValidation(t *testing.T) { + tests := []struct { + name string + srvTopoServer srvtopo.Server + keyspace string + expectedWatchFlag bool + description string + }{ + { + name: "Nil srvTopoServer prevents watch start", + srvTopoServer: nil, + keyspace: "test_keyspace", + expectedWatchFlag: false, + description: "Watch should not start when srvTopoServer is nil", + }, + { + name: "Empty keyspace prevents watch start", + srvTopoServer: srvtopotest.NewPassthroughSrvTopoServer(), + keyspace: "", + expectedWatchFlag: false, + description: "Watch should not start when keyspace is empty", + }, + { + name: "Valid fields allow watch to start", + srvTopoServer: srvtopotest.NewPassthroughSrvTopoServer(), + keyspace: "test_keyspace", + expectedWatchFlag: true, + description: "Watch should start when all required fields are valid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, tt.srvTopoServer) + + qt.InitDBConfig(tt.keyspace) + + qt.startSrvKeyspaceWatch() + + if tt.expectedWatchFlag { + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, tt.description) + } else { + // For negative cases, ensure the watch doesn't start within a reasonable time + require.Never(t, func() bool { + return qt.watchStarted.Load() + }, 500*time.Millisecond, 10*time.Millisecond, tt.description) + } + }) + } +} + +// TestQueryThrottler_startSrvKeyspaceWatch_WatchCallback tests that WatchSrvKeyspace callback receives config updates and HandleConfigUpdate is invoked correctly. +func TestQueryThrottler_startSrvKeyspaceWatch_WatchCallback(t *testing.T) { + tests := []struct { + name string + enabled bool + strategy registry.ThrottlingStrategy + dryRun bool + expectedEnabled bool + expectedStrategy registry.ThrottlingStrategy + expectedDryRun bool + }{ + { + name: "TabletThrottler strategy with enabled and no dry-run", + enabled: true, + strategy: registry.ThrottlingStrategyTabletThrottler, + dryRun: false, + expectedEnabled: true, + expectedStrategy: registry.ThrottlingStrategyTabletThrottler, + expectedDryRun: false, + }, + { + name: "TabletThrottler disabled with dry-run", + enabled: false, + strategy: registry.ThrottlingStrategyTabletThrottler, + dryRun: true, + expectedEnabled: false, + expectedStrategy: registry.ThrottlingStrategyTabletThrottler, + expectedDryRun: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + srvTopoServer.SrvKeyspace = createTestSrvKeyspace(tt.enabled, tt.strategy, tt.dryRun) + srvTopoServer.SrvKeyspaceError = nil + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) + + qt.InitDBConfig("test_keyspace") + + // Verify watch was started + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should have been started") + + // Verify that HandleConfigUpdate was called by checking if the config was updated + require.Eventually(t, func() bool { + qt.mu.RLock() + defer qt.mu.RUnlock() + return qt.cfg.Enabled == tt.expectedEnabled && + qt.cfg.StrategyName == tt.expectedStrategy && + qt.cfg.DryRun == tt.expectedDryRun + }, 2*time.Second, 10*time.Millisecond, "Config should be updated correctly after callback is invoked") + + }) + } +} + +// TestQueryThrottler_startSrvKeyspaceWatch_ShutdownStopsWatch tests that Shutdown properly cancels the watch context and stops the watch goroutine. +func TestQueryThrottler_startSrvKeyspaceWatch_ShutdownStopsWatch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := tabletenv.NewEnv(vtenv.NewTestEnv(), &tabletenv.TabletConfig{}, "TestThrottler") + + srvTopoServer := srvtopotest.NewPassthroughSrvTopoServer() + srvTopoServer.SrvKeyspace = createTestSrvKeyspace(true, registry.ThrottlingStrategyTabletThrottler, false) + srvTopoServer.SrvKeyspaceError = nil + + throttler := &throttle.Throttler{} + qt := NewQueryThrottler(ctx, throttler, env, &topodatapb.TabletAlias{Cell: "test-cell", Uid: uint32(123)}, srvTopoServer) + + qt.InitDBConfig("test_keyspace") + + // Verify watch was started + require.Eventually(t, func() bool { + return qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should have been started before shutdown") + + require.NotNil(t, qt.cancelWatchContext, "Cancel function should be set before shutdown") + + // Call Shutdown to stop the watch + qt.Shutdown() + + // Verify that the watch started flag is reset + require.Eventually(t, func() bool { + return !qt.watchStarted.Load() + }, 2*time.Second, 10*time.Millisecond, "Watch should be marked as not started after shutdown") + + // Verify that the strategy was stopped + qt.mu.RLock() + strategyInstance := qt.strategyHandlerInstance + qt.mu.RUnlock() + require.NotNil(t, strategyInstance, "Strategy instance should still exist after shutdown") + + // Call Shutdown again to ensure it doesn't panic + qt.Shutdown() + + // Verify the watch flag remains false + require.False(t, qt.watchStarted.Load(), "Watch should remain not started after multiple shutdowns") +} diff --git a/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go b/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go index 02ea175cd67..686d1521f69 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go +++ b/go/vt/vttablet/tabletserver/querythrottler/registry/registry.go @@ -60,15 +60,15 @@ func CreateStrategy(cfg StrategyConfig, deps Deps) ThrottlingStrategyHandler { // The design intent is: // Every “real” strategy must self-register to opt-in. // NoOpStrategy must always be available—even before any registration happens—so the registry itself can safely fall back on it. - factory, ok := Get(cfg.GetStrategy()) + factory, ok := Get(cfg.GetStrategyName()) if !ok { - log.Warningf("Unknown strategy %s, using NoOp", cfg.GetStrategy()) + log.Warningf("Unknown strategy %s, using NoOp", cfg.GetStrategyName()) return &NoOpStrategy{} } strategy, err := factory.New(deps, cfg) if err != nil { - log.Errorf("Strategy %s failed to init: %v, using NoOp", cfg.GetStrategy(), err) + log.Errorf("Strategy %s failed to init: %v, using NoOp", cfg.GetStrategyName(), err) return &NoOpStrategy{} } diff --git a/go/vt/vttablet/tabletserver/querythrottler/registry/registry_test.go b/go/vt/vttablet/tabletserver/querythrottler/registry/registry_test.go index 149c0bd33d2..ac3e205d9b5 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/registry/registry_test.go +++ b/go/vt/vttablet/tabletserver/querythrottler/registry/registry_test.go @@ -76,7 +76,7 @@ type testConfig struct { strategy ThrottlingStrategy } -func (c testConfig) GetStrategy() ThrottlingStrategy { +func (c testConfig) GetStrategyName() ThrottlingStrategy { return c.strategy } diff --git a/go/vt/vttablet/tabletserver/querythrottler/registry/types.go b/go/vt/vttablet/tabletserver/querythrottler/registry/types.go index 69b5d36079b..9343f8f346a 100644 --- a/go/vt/vttablet/tabletserver/querythrottler/registry/types.go +++ b/go/vt/vttablet/tabletserver/querythrottler/registry/types.go @@ -57,7 +57,7 @@ type ThrottleDecision struct { // StrategyConfig defines the configuration interface that strategy implementations // must satisfy. This avoids circular imports by using a generic interface. type StrategyConfig interface { - GetStrategy() ThrottlingStrategy + GetStrategyName() ThrottlingStrategy } // Deps holds the dependencies required by strategy factories. diff --git a/go/vt/vttablet/tabletserver/querythrottler/test_utils.go b/go/vt/vttablet/tabletserver/querythrottler/test_utils.go new file mode 100644 index 00000000000..9161c046d8b --- /dev/null +++ b/go/vt/vttablet/tabletserver/querythrottler/test_utils.go @@ -0,0 +1,78 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package querythrottler + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/vt/proto/querythrottler" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/querythrottler/registry" +) + +// createTestSrvKeyspace creates a SrvKeyspace with query throttler config for testing +func createTestSrvKeyspace(enabled bool, strategy registry.ThrottlingStrategy, dryRun bool) *topodatapb.SrvKeyspace { + var protoStrategy querythrottler.ThrottlingStrategy + switch strategy { + case registry.ThrottlingStrategyTabletThrottler: + protoStrategy = querythrottler.ThrottlingStrategy_TABLET_THROTTLER + default: + protoStrategy = querythrottler.ThrottlingStrategy_UNKNOWN + } + + return &topodatapb.SrvKeyspace{ + QueryThrottlerConfig: &querythrottler.Config{ + Enabled: enabled, + Strategy: protoStrategy, + DryRun: dryRun, + }, + } +} + +// mockThrottlingStrategy is a test strategy that allows us to control throttling decisions +type mockThrottlingStrategy struct { + decision registry.ThrottleDecision + started bool + stopped bool +} + +func (m *mockThrottlingStrategy) Evaluate(ctx context.Context, targetTabletType topodatapb.TabletType, parsedQuery *sqlparser.ParsedQuery, transactionID int64, attrs registry.QueryAttributes) registry.ThrottleDecision { + return m.decision +} + +func (m *mockThrottlingStrategy) Start() { + m.started = true +} + +func (m *mockThrottlingStrategy) Stop() { + m.stopped = true +} + +func (m *mockThrottlingStrategy) GetStrategyName() string { + return "MockStrategy" +} + +// testLogCapture captures log output for testing +type testLogCapture struct { + logs []string +} + +func (lc *testLogCapture) captureLog(msg string, args ...interface{}) { + lc.logs = append(lc.logs, fmt.Sprintf(msg, args...)) +} diff --git a/go/vt/vttablet/tabletserver/querythrottler/test_wrappers.go b/go/vt/vttablet/tabletserver/querythrottler/test_wrappers.go deleted file mode 100644 index 85f74eb6080..00000000000 --- a/go/vt/vttablet/tabletserver/querythrottler/test_wrappers.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2025 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package querythrottler - -import "context" - -// fakeConfigLoader is a test fake that implements ConfigLoader. -type fakeConfigLoader struct { - giveConfig Config -} - -// newFakeConfigLoader creates a fake config loader -// with a fully constructed Config. -func newFakeConfigLoader(cfg Config) *fakeConfigLoader { - return &fakeConfigLoader{ - giveConfig: cfg, - } -} - -// Load implements the ConfigLoader interface. -func (f *fakeConfigLoader) Load(ctx context.Context) (Config, error) { - return f.giveConfig, nil -} diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 12a33bad575..fff22948d77 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -190,7 +190,7 @@ func NewTabletServer(ctx context.Context, env *vtenv.Environment, name string, c tsv.rt = repltracker.NewReplTracker(tsv, alias) tsv.lagThrottler = throttle.NewThrottler(tsv, srvTopoServer, topoServer, alias, tsv.rt.HeartbeatWriter(), tabletTypeFunc, throttlerPoolName) tsv.qThrottler = throttle.NewThrottler(tsv, srvTopoServer, topoServer, alias, tsv.rt.HeartbeatWriter(), tabletTypeFunc, queryThrottlerPoolName) - tsv.queryThrottler = querythrottler.NewQueryThrottler(ctx, tsv.qThrottler, querythrottler.NewFileBasedConfigLoader(), tsv) + tsv.queryThrottler = querythrottler.NewQueryThrottler(ctx, tsv.qThrottler, tsv, alias, srvTopoServer) tsv.vstreamer = vstreamer.NewEngine(tsv, srvTopoServer, tsv.se, tsv.lagThrottler, alias.Cell) tsv.tracker = schema.NewTracker(tsv, tsv.vstreamer, tsv.se) @@ -327,6 +327,8 @@ func (tsv *TabletServer) InitDBConfig(target *querypb.Target, dbcfgs *dbconfigs. tsv.lagThrottler.InitDBConfig(target.Keyspace, target.Shard) tsv.qThrottler.InitDBConfig(target.Keyspace, target.Shard) tsv.tableGC.InitDBConfig(target.Keyspace, target.Shard, dbcfgs.DBName) + tsv.queryThrottler.InitDBConfig(target.Keyspace) + return nil } diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index 3c8e647046e..c221478d82f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -415,10 +415,10 @@ func (uvs *uvstreamer) currentPosition() (replication.Position, error) { // 3. TablePKs not nil, startPos empty => table copy (for pks > lastPK) // 4. TablePKs not nil, startPos set => run catchup from startPos, then table copy (for pks > lastPK) // -// If TablesToCopy option is not nil, copy only the tables listed in TablesToCopy. -// For other tables not in TablesToCopy, if startPos is set, perform catchup starting from startPos. +// If table copy phase should run based on one of the previous states, then only copy the tables in +// TablesToCopy list. func (uvs *uvstreamer) init() error { - if uvs.startPos == "" /* full copy */ || len(uvs.inTablePKs) > 0 /* resume copy */ || len(uvs.options.GetTablesToCopy()) > 0 /* copy specific tables */ { + if uvs.startPos == "" /* full copy */ || len(uvs.inTablePKs) > 0 /* resume copy */ { if err := uvs.buildTablePlan(); err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index ed31b3080f3..c90cb2d7ad7 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -2381,3 +2381,30 @@ func TestFilteredIsNullOperator(t *testing.T) { }) } } + +func TestUVStreamerNoCopyWithGTID(t *testing.T) { + execStatements(t, []string{ + "create table t1(id int, val varchar(128), primary key(id))", + "insert into t1 values (1, 'val1')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + ctx := context.Background() + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + } + pos := primaryPosition(t) + options := &binlogdatapb.VStreamOptions{ + TablesToCopy: []string{"t1"}, + } + uvs := newUVStreamer(ctx, engine, env.Dbcfgs.DbaWithDB(), env.SchemaEngine, pos, + nil, filter, testLocalVSchema, throttlerapp.VStreamerName, + func([]*binlogdatapb.VEvent) error { return nil }, options) + err := uvs.init() + require.NoError(t, err) + require.Empty(t, uvs.plans, "Should not build table plans when startPos is set") +} diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index f1037822c74..5a61f5d3d1d 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -254,7 +254,7 @@ type TabletManagerClient interface { // DemotePrimary tells the soon-to-be-former primary it's going to change, // and it should go read-only and return its current position. - DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) + DemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, force bool) (*replicationdatapb.PrimaryStatus, error) // UndoDemotePrimary reverts all changes made by DemotePrimary // To be used if we are unable to promote the chosen new primary diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go index 3de8403f058..46f42245607 100644 --- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go +++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go @@ -1266,7 +1266,7 @@ func tmRPCTestInitReplicaPanic(ctx context.Context, t *testing.T, client tmclien expectHandleRPCPanic(t, "InitReplica", true /*verbose*/, err) } -func (fra *fakeRPCTM) DemotePrimary(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { +func (fra *fakeRPCTM) DemotePrimary(ctx context.Context, force bool) (*replicationdatapb.PrimaryStatus, error) { if fra.panics { panic(errors.New("test-triggered panic")) } @@ -1274,12 +1274,12 @@ func (fra *fakeRPCTM) DemotePrimary(ctx context.Context) (*replicationdatapb.Pri } func tmRPCTestDemotePrimary(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - PrimaryStatus, err := client.DemotePrimary(ctx, tablet) + PrimaryStatus, err := client.DemotePrimary(ctx, tablet, false) compareError(t, "DemotePrimary", err, PrimaryStatus.Position, testPrimaryStatus.Position) } func tmRPCTestDemotePrimaryPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - _, err := client.DemotePrimary(ctx, tablet) + _, err := client.DemotePrimary(ctx, tablet, false) expectHandleRPCPanic(t, "DemotePrimary", true /*verbose*/, err) } diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go index df27e6928b5..2b3744574a3 100644 --- a/go/vt/vttest/environment.go +++ b/go/vt/vttest/environment.go @@ -21,8 +21,10 @@ import ( "net" "os" "path" + "slices" "strconv" "strings" + "sync" "vitess.io/vitess/go/vt/proto/vttest" @@ -30,6 +32,14 @@ import ( _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" ) +var ( + // randomPortsMu provides synchronization for randomPorts(). + randomPortsMu sync.Mutex + + // usedRandomPorts stores ports that have been used by remotePort(). + usedRandomPorts []int +) + // Environment is the interface that customizes the global settings for // the test cluster. Usually the same environment settings are shared by // all the LocalCluster instances in a given test suite, with each instance @@ -235,21 +245,31 @@ func tmpdir(dataroot string) (dir string, err error) { // randomPort gets a random port that is available for a TCP connection. // After we generate a random port, we try to establish tcp connections on it and the next 5 values. // If any of them fail, then we try a different port. -func randomPort() int { +func randomPort() (port int) { + randomPortsMu.Lock() + defer randomPortsMu.Unlock() for { - port := int(rand.Int32N(20000) + 10000) + portBase := int(rand.Int32N(20000) + 10000) portInUse := false + portRange := make([]int, 0, 6) for i := 0; i < 6; i++ { - ln, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port+i))) + port = portBase + i + if slices.Contains(usedRandomPorts, port) { + portInUse = true + break + } + ln, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port))) if err != nil { portInUse = true break } + portRange = append(portRange, port) ln.Close() } if portInUse { continue } + usedRandomPorts = append(usedRandomPorts, portRange...) return port } } diff --git a/go/vt/vttest/environment_test.go b/go/vt/vttest/environment_test.go index 1b6e9046130..dbaa5199892 100644 --- a/go/vt/vttest/environment_test.go +++ b/go/vt/vttest/environment_test.go @@ -44,3 +44,15 @@ func TestVtcomboArguments(t *testing.T) { assert.ElementsMatch(t, expectedServiceList, serviceMapList, "--service-map list does not contain expected vtcombo services") }) } + +func TestVtcomboRandomPort(t *testing.T) { + require.Empty(t, usedRandomPorts) + port := randomPort() + // 10000-30000 is the range the rand call in randomPorts() can return + require.GreaterOrEqual(t, port, 10000) + require.LessOrEqual(t, port, 30000) + require.Len(t, usedRandomPorts, 6) + require.Contains(t, usedRandomPorts, port) + require.NotEqual(t, port, randomPort()) + require.Len(t, usedRandomPorts, 12) +} diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 388bd1ea3f9..44589db3b62 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -23,11 +23,13 @@ import ( "errors" "fmt" "io" + "net" "net/http" "os" "os/exec" "path" "path/filepath" + "strconv" "strings" "time" "unicode" @@ -158,6 +160,9 @@ type Config struct { VtgateTabletRefreshInterval time.Duration + // Gateway initial tablet timeout - how long VTGate waits for tablets at startup + VtgateGatewayInitialTabletTimeout time.Duration + // Set the planner to fail on scatter queries NoScatter bool } @@ -800,7 +805,7 @@ func (db *LocalCluster) VTProcess() *VtProcess { // a pointer to the interface. To read this vschema, the caller must convert it to a map func (vt *VtProcess) ReadVSchema() (*interface{}, error) { httpClient := &http.Client{Timeout: 5 * time.Second} - resp, err := httpClient.Get(fmt.Sprintf("http://%s:%d/debug/vschema", vt.BindAddress, vt.Port)) + resp, err := httpClient.Get("http://" + net.JoinHostPort(vt.BindAddress, strconv.Itoa(vt.Port)) + "/debug/vschema") if err != nil { return nil, err } diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 418f52187ae..b29e713ad2b 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -279,6 +279,14 @@ func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) ( vt.ExtraArgs = append(vt.ExtraArgs, fmt.Sprintf("--tablet-refresh-interval=%v", args.VtgateTabletRefreshInterval)) } + // If gateway initial tablet timeout is not defined then we will give it value of 30s (vtcombo default). + // Setting it to a lower value will reduce the time VTGate waits for tablets at startup. + if args.VtgateGatewayInitialTabletTimeout <= 0 { + vt.ExtraArgs = append(vt.ExtraArgs, fmt.Sprintf("--gateway-initial-tablet-timeout=%v", 30*time.Second)) + } else { + vt.ExtraArgs = append(vt.ExtraArgs, fmt.Sprintf("--gateway-initial-tablet-timeout=%v", args.VtgateGatewayInitialTabletTimeout)) + } + vt.ExtraArgs = append(vt.ExtraArgs, QueryServerArgs...) vt.ExtraArgs = append(vt.ExtraArgs, environment.VtcomboArguments()...) diff --git a/proto/querythrottler.proto b/proto/querythrottler.proto new file mode 100644 index 00000000000..71dc83fa399 --- /dev/null +++ b/proto/querythrottler.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package querythrottler; + +option go_package = "vitess.io/vitess/go/vt/proto/querythrottler"; + +// ThrottlingStrategy represents the strategy used to apply throttling +enum ThrottlingStrategy { + UNKNOWN = 0; + TABLET_THROTTLER = 1; +} + +// Config defines the runtime configuration for the IncomingQueryThrottler +message Config { + bool enabled = 1; + ThrottlingStrategy strategy = 2; + TabletStrategyConfig tablet_strategy_config = 3; + bool dry_run = 4; +} + +// TabletStrategyConfig holds per-tablet-type throttling rules +message TabletStrategyConfig { + map tablet_rules = 1; +} + +// StatementRuleSet maps SQL statement types to metric rules +message StatementRuleSet { + map statement_rules = 1; +} + +// MetricRuleSet maps metric names to their throttling rules +message MetricRuleSet { + map metric_rules = 1; +} + +// MetricRule defines how to throttle based on a specific metric +message MetricRule { + repeated ThrottleThreshold thresholds = 1; +} + +// ThrottleThreshold defines a condition for throttling +message ThrottleThreshold { + double above = 1; + int32 throttle = 2; +} diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index fc8acdca8a5..e46bc63dbd4 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -485,6 +485,7 @@ message InitReplicaResponse { } message DemotePrimaryRequest { + bool force = 1; } message DemotePrimaryResponse { diff --git a/proto/topodata.proto b/proto/topodata.proto index 611ebf55b43..efb05acd146 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -28,6 +28,8 @@ package topodata; import "vtorcdata.proto"; import "vttime.proto"; +import "querythrottler.proto"; + // KeyRange describes a range of sharding keys, when range-based // sharding is used. @@ -305,6 +307,9 @@ message Keyspace { // Vtorc is the vtorc keyspace config/state for the keyspace. vtorcdata.Keyspace vtorc_state = 11; + + // QueryThrottler provides a flexible throttling configuration that supports multiple throttling strategies beyond the standard tablet throttling. + querythrottler.Config query_throttler_config = 20000; } // ShardReplication describes the MySQL replication relationships @@ -437,6 +442,10 @@ message SrvKeyspace { // shards and tablets. This is copied from the global keyspace // object. ThrottlerConfig throttler_config = 6; + + // QueryThrottler provides a flexible throttling configuration that supports multiple throttling strategies beyond the standard tablet throttling. + querythrottler.Config query_throttler_config = 20000; + } // CellInfo contains information about a cell. CellInfo objects are diff --git a/test.go b/test.go index d784e434267..76247ac4216 100755 --- a/test.go +++ b/test.go @@ -114,7 +114,7 @@ const ( configFileName = "test/config.json" // List of flavors for which a bootstrap Docker image is available. - flavors = "mysql80,mysql84,percona80" + flavors = "mysql80,mysql84,percona80,percona84" ) // Config is the overall object serialized in test/config.json. diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 5e576d65a62..a471d379713 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -18,12 +18,17 @@ package main import ( "bytes" + "context" + "errors" "fmt" "log" "os" "path" "strings" "text/template" + "time" + + "github.com/google/go-github/v76/github" ) type mysqlVersion string @@ -46,11 +51,20 @@ var ( unitTestDatabases = []mysqlVersion{mysql57, mysql80, mysql84} ) +var ( + ghClient = github.NewClient(nil) + ghClientTimeout = time.Second * 10 + goJunitReportSHA string +) + const ( oracleCloudRunner = "oracle-vm-16cpu-64gb-x86-64" githubRunner = "gh-hosted-runners-16cores-1-24.04" cores16RunnerName = oracleCloudRunner defaultRunnerName = "ubuntu-24.04" + + githubOrg = "vitessio" + goJunitReportRepo = "go-junit-report" ) // To support a private git repository, set goPrivate to a repo in @@ -178,7 +192,7 @@ var ( ) type unitTest struct { - Name, RunsOn, Platform, FileName, GoPrivate, Evalengine string + Name, RunsOn, Platform, FileName, GoPrivate, GoJunitReportSHA, Evalengine string } type clusterTest struct { @@ -187,6 +201,7 @@ type clusterTest struct { BuildTag string RunsOn string GoPrivate string + GoJunitReportSHA string MemoryCheck bool MakeTools, InstallXtraBackup bool Docker bool @@ -198,11 +213,12 @@ type clusterTest struct { } type vitessTesterTest struct { - FileName string - Name string - RunsOn string - GoPrivate string - Path string + FileName string + Name string + RunsOn string + GoPrivate string + GoJunitReportSHA string + Path string } // clusterMySQLVersions return list of mysql versions (one or more) that this cluster needs to test against @@ -239,6 +255,12 @@ func mergeBlankLines(buf *bytes.Buffer) string { } func main() { + var err error + goJunitReportSHA, err = getRepoHeadSHA1(githubOrg, goJunitReportRepo) + if err != nil { + log.Fatalf("failed to get HEAD SHA1 of %s/%s: %v", githubOrg, goJunitReportRepo, err) + } + generateUnitTestWorkflows() generateVitessTesterWorkflows(vitessTesterMap, clusterVitessTesterTemplate) generateClusterWorkflows(clusterList, clusterTestTemplate) @@ -255,13 +277,24 @@ func canonnizeList(list []string) []string { return output } +func getRepoHeadSHA1(owner, repo string) (string, error) { + if ghClient == nil || ghClient.Repositories == nil { + return "", errors.New("invalid github client") + } + ctx, cancel := context.WithTimeout(context.Background(), ghClientTimeout) + defer cancel() + sha, _, err := ghClient.Repositories.GetCommitSHA1(ctx, owner, repo, "HEAD", "") + return sha, err +} + func generateVitessTesterWorkflows(mp map[string]string, tpl string) { for test, testPath := range mp { tt := &vitessTesterTest{ - Name: fmt.Sprintf("Vitess Tester (%v)", test), - RunsOn: defaultRunnerName, - GoPrivate: goPrivate, - Path: testPath, + Name: fmt.Sprintf("Vitess Tester (%v)", test), + RunsOn: defaultRunnerName, + GoPrivate: goPrivate, + GoJunitReportSHA: goJunitReportSHA, + Path: testPath, } templateFileName := tpl @@ -279,11 +312,12 @@ func generateClusterWorkflows(list []string, tpl string) { for _, cluster := range clusters { for _, mysqlVersion := range clusterMySQLVersions() { test := &clusterTest{ - Name: fmt.Sprintf("Cluster (%s)", cluster), - Shard: cluster, - BuildTag: buildTag[cluster], - RunsOn: defaultRunnerName, - GoPrivate: goPrivate, + Name: fmt.Sprintf("Cluster (%s)", cluster), + Shard: cluster, + BuildTag: buildTag[cluster], + RunsOn: defaultRunnerName, + GoPrivate: goPrivate, + GoJunitReportSHA: goJunitReportSHA, } cores16Clusters := canonnizeList(clusterRequiring16CoresMachines) for _, cores16Cluster := range cores16Clusters { @@ -359,11 +393,12 @@ func generateUnitTestWorkflows() { for _, platform := range unitTestDatabases { for _, evalengine := range []string{"1", "0"} { test := &unitTest{ - Name: fmt.Sprintf("Unit Test (%s%s)", evalengineToString(evalengine), platform), - RunsOn: defaultRunnerName, - Platform: string(platform), - GoPrivate: goPrivate, - Evalengine: evalengine, + Name: fmt.Sprintf("Unit Test (%s%s)", evalengineToString(evalengine), platform), + RunsOn: defaultRunnerName, + Platform: string(platform), + GoPrivate: goPrivate, + GoJunitReportSHA: goJunitReportSHA, + Evalengine: evalengine, } test.FileName = fmt.Sprintf("unit_test_%s%s.yml", evalengineToString(evalengine), platform) path := fmt.Sprintf("%s/%s", workflowConfigDir, test.FileName) diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index 890d85366e4..ede66093ecb 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -141,7 +141,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@{{.GoJunitReportSHA}} {{if .NeedsMinio }} - name: Install Minio diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl index 13a32866b3d..6df85fdd2c9 100644 --- a/test/templates/cluster_endtoend_test_mysql57.tpl +++ b/test/templates/cluster_endtoend_test_mysql57.tpl @@ -116,7 +116,7 @@ jobs: sudo service etcd stop # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@{{.GoJunitReportSHA}} {{if .InstallXtraBackup}} diff --git a/test/templates/cluster_vitess_tester.tpl b/test/templates/cluster_vitess_tester.tpl index 194dcbe47e0..152949b9b7e 100644 --- a/test/templates/cluster_vitess_tester.tpl +++ b/test/templates/cluster_vitess_tester.tpl @@ -98,7 +98,7 @@ jobs: go mod download # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@{{.GoJunitReportSHA}} # install vitess tester go install github.com/vitessio/vt/go/vt@e43009309f599378504905d4b804460f47822ac5 diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl index 1a5d518208a..b87289e2946 100644 --- a/test/templates/unit_test.tpl +++ b/test/templates/unit_test.tpl @@ -105,7 +105,7 @@ jobs: go install golang.org/x/tools/cmd/goimports@latest # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD + go install github.com/vitessio/go-junit-report@{{.GoJunitReportSHA}} - name: Run make tools if: steps.changes.outputs.unit_tests == 'true' diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index dcaf7ed30f6..339e08d229d 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -53,7 +53,6 @@ "i": "^0.3.7", "jsdom": "^21.1.1", "msw": "^2.5.2", - "npm": "^10.9.2", "postcss": "^8.4.31", "prettier": "^2.2.1", "protobufjs-cli": "^1.1.3", @@ -3868,6 +3867,26 @@ "@parcel/watcher-win32-x64": "2.5.0" } }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz", + "integrity": "sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, "node_modules/@parcel/watcher-darwin-arm64": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz", @@ -3888,6 +3907,226 @@ "url": "https://opencollective.com/parcel" } }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz", + "integrity": "sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz", + "integrity": "sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz", + "integrity": "sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz", + "integrity": "sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz", + "integrity": "sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz", + "integrity": "sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz", + "integrity": "sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz", + "integrity": "sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz", + "integrity": "sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz", + "integrity": "sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz", + "integrity": "sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -10960,9 +11199,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -11919,167 +12158,6 @@ "node": ">=0.10.0" } }, - "node_modules/npm": { - "version": "10.9.2", - "resolved": "https://registry.npmjs.org/npm/-/npm-10.9.2.tgz", - "integrity": "sha512-iriPEPIkoMYUy3F6f3wwSZAU93E0Eg6cHwIR6jzzOXWSy+SD/rOODEs74cVONHKSx2obXtuUoyidVEhISrisgQ==", - "bundleDependencies": [ - "@isaacs/string-locale-compare", - "@npmcli/arborist", - "@npmcli/config", - "@npmcli/fs", - "@npmcli/map-workspaces", - "@npmcli/package-json", - "@npmcli/promise-spawn", - "@npmcli/redact", - "@npmcli/run-script", - "@sigstore/tuf", - "abbrev", - "archy", - "cacache", - "chalk", - "ci-info", - "cli-columns", - "fastest-levenshtein", - "fs-minipass", - "glob", - "graceful-fs", - "hosted-git-info", - "ini", - "init-package-json", - "is-cidr", - "json-parse-even-better-errors", - "libnpmaccess", - "libnpmdiff", - "libnpmexec", - "libnpmfund", - "libnpmhook", - "libnpmorg", - "libnpmpack", - "libnpmpublish", - "libnpmsearch", - "libnpmteam", - "libnpmversion", - "make-fetch-happen", - "minimatch", - "minipass", - "minipass-pipeline", - "ms", - "node-gyp", - "nopt", - "normalize-package-data", - "npm-audit-report", - "npm-install-checks", - "npm-package-arg", - "npm-pick-manifest", - "npm-profile", - "npm-registry-fetch", - "npm-user-validate", - "p-map", - "pacote", - "parse-conflict-json", - "proc-log", - "qrcode-terminal", - "read", - "semver", - "spdx-expression-parse", - "ssri", - "supports-color", - "tar", - "text-table", - "tiny-relative-date", - "treeverse", - "validate-npm-package-name", - "which", - "write-file-atomic" - ], - "dev": true, - "license": "Artistic-2.0", - "workspaces": [ - "docs", - "smoke-tests", - "mock-globals", - "mock-registry", - "workspaces/*" - ], - "dependencies": { - "@isaacs/string-locale-compare": "^1.1.0", - "@npmcli/arborist": "^8.0.0", - "@npmcli/config": "^9.0.0", - "@npmcli/fs": "^4.0.0", - "@npmcli/map-workspaces": "^4.0.2", - "@npmcli/package-json": "^6.1.0", - "@npmcli/promise-spawn": "^8.0.2", - "@npmcli/redact": "^3.0.0", - "@npmcli/run-script": "^9.0.1", - "@sigstore/tuf": "^3.0.0", - "abbrev": "^3.0.0", - "archy": "~1.0.0", - "cacache": "^19.0.1", - "chalk": "^5.3.0", - "ci-info": "^4.1.0", - "cli-columns": "^4.0.0", - "fastest-levenshtein": "^1.0.16", - "fs-minipass": "^3.0.3", - "glob": "^10.4.5", - "graceful-fs": "^4.2.11", - "hosted-git-info": "^8.0.2", - "ini": "^5.0.0", - "init-package-json": "^7.0.2", - "is-cidr": "^5.1.0", - "json-parse-even-better-errors": "^4.0.0", - "libnpmaccess": "^9.0.0", - "libnpmdiff": "^7.0.0", - "libnpmexec": "^9.0.0", - "libnpmfund": "^6.0.0", - "libnpmhook": "^11.0.0", - "libnpmorg": "^7.0.0", - "libnpmpack": "^8.0.0", - "libnpmpublish": "^10.0.1", - "libnpmsearch": "^8.0.0", - "libnpmteam": "^7.0.0", - "libnpmversion": "^7.0.0", - "make-fetch-happen": "^14.0.3", - "minimatch": "^9.0.5", - "minipass": "^7.1.1", - "minipass-pipeline": "^1.2.4", - "ms": "^2.1.2", - "node-gyp": "^11.0.0", - "nopt": "^8.0.0", - "normalize-package-data": "^7.0.0", - "npm-audit-report": "^6.0.0", - "npm-install-checks": "^7.1.1", - "npm-package-arg": "^12.0.0", - "npm-pick-manifest": "^10.0.0", - "npm-profile": "^11.0.1", - "npm-registry-fetch": "^18.0.2", - "npm-user-validate": "^3.0.0", - "p-map": "^4.0.0", - "pacote": "^19.0.1", - "parse-conflict-json": "^4.0.0", - "proc-log": "^5.0.0", - "qrcode-terminal": "^0.12.0", - "read": "^4.0.0", - "semver": "^7.6.3", - "spdx-expression-parse": "^4.0.0", - "ssri": "^12.0.0", - "supports-color": "^9.4.0", - "tar": "^6.2.1", - "text-table": "~0.2.0", - "tiny-relative-date": "^1.3.0", - "treeverse": "^3.0.0", - "validate-npm-package-name": "^6.0.0", - "which": "^5.0.0", - "write-file-atomic": "^6.0.0" - }, - "bin": { - "npm": "bin/npm-cli.js", - "npx": "bin/npx-cli.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, "node_modules/npm-run-path": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", @@ -12093,2606 +12171,6 @@ "node": ">=8" } }, - "node_modules/npm/node_modules/@isaacs/cliui": { - "version": "8.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/npm/node_modules/@isaacs/cliui/node_modules/emoji-regex": { - "version": "9.2.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/@isaacs/cliui/node_modules/string-width": { - "version": "5.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/npm/node_modules/@isaacs/fs-minipass": { - "version": "4.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.4" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/npm/node_modules/@isaacs/string-locale-compare": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/@npmcli/agent": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/arborist": { - "version": "8.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@isaacs/string-locale-compare": "^1.1.0", - "@npmcli/fs": "^4.0.0", - "@npmcli/installed-package-contents": "^3.0.0", - "@npmcli/map-workspaces": "^4.0.1", - "@npmcli/metavuln-calculator": "^8.0.0", - "@npmcli/name-from-folder": "^3.0.0", - "@npmcli/node-gyp": "^4.0.0", - "@npmcli/package-json": "^6.0.1", - "@npmcli/query": "^4.0.0", - "@npmcli/redact": "^3.0.0", - "@npmcli/run-script": "^9.0.1", - "bin-links": "^5.0.0", - "cacache": "^19.0.1", - "common-ancestor-path": "^1.0.1", - "hosted-git-info": "^8.0.0", - "json-parse-even-better-errors": "^4.0.0", - "json-stringify-nice": "^1.1.4", - "lru-cache": "^10.2.2", - "minimatch": "^9.0.4", - "nopt": "^8.0.0", - "npm-install-checks": "^7.1.0", - "npm-package-arg": "^12.0.0", - "npm-pick-manifest": "^10.0.0", - "npm-registry-fetch": "^18.0.1", - "pacote": "^19.0.0", - "parse-conflict-json": "^4.0.0", - "proc-log": "^5.0.0", - "proggy": "^3.0.0", - "promise-all-reject-late": "^1.0.0", - "promise-call-limit": "^3.0.1", - "read-package-json-fast": "^4.0.0", - "semver": "^7.3.7", - "ssri": "^12.0.0", - "treeverse": "^3.0.0", - "walk-up-path": "^3.0.1" - }, - "bin": { - "arborist": "bin/index.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/config": { - "version": "9.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/map-workspaces": "^4.0.1", - "@npmcli/package-json": "^6.0.1", - "ci-info": "^4.0.0", - "ini": "^5.0.0", - "nopt": "^8.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5", - "walk-up-path": "^3.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/fs": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/git": { - "version": "6.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/promise-spawn": "^8.0.0", - "ini": "^5.0.0", - "lru-cache": "^10.0.1", - "npm-pick-manifest": "^10.0.0", - "proc-log": "^5.0.0", - "promise-inflight": "^1.0.1", - "promise-retry": "^2.0.1", - "semver": "^7.3.5", - "which": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/installed-package-contents": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-bundled": "^4.0.0", - "npm-normalize-package-bin": "^4.0.0" - }, - "bin": { - "installed-package-contents": "bin/index.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/map-workspaces": { - "version": "4.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/name-from-folder": "^3.0.0", - "@npmcli/package-json": "^6.0.0", - "glob": "^10.2.2", - "minimatch": "^9.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/metavuln-calculator": { - "version": "8.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "cacache": "^19.0.0", - "json-parse-even-better-errors": "^4.0.0", - "pacote": "^20.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote": { - "version": "20.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/git": "^6.0.0", - "@npmcli/installed-package-contents": "^3.0.0", - "@npmcli/package-json": "^6.0.0", - "@npmcli/promise-spawn": "^8.0.0", - "@npmcli/run-script": "^9.0.0", - "cacache": "^19.0.0", - "fs-minipass": "^3.0.0", - "minipass": "^7.0.2", - "npm-package-arg": "^12.0.0", - "npm-packlist": "^9.0.0", - "npm-pick-manifest": "^10.0.0", - "npm-registry-fetch": "^18.0.0", - "proc-log": "^5.0.0", - "promise-retry": "^2.0.1", - "sigstore": "^3.0.0", - "ssri": "^12.0.0", - "tar": "^6.1.11" - }, - "bin": { - "pacote": "bin/index.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/name-from-folder": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/node-gyp": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/package-json": { - "version": "6.1.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/git": "^6.0.0", - "glob": "^10.2.2", - "hosted-git-info": "^8.0.0", - "json-parse-even-better-errors": "^4.0.0", - "normalize-package-data": "^7.0.0", - "proc-log": "^5.0.0", - "semver": "^7.5.3" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/promise-spawn": { - "version": "8.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "which": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/query": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "postcss-selector-parser": "^6.1.2" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/redact": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@npmcli/run-script": { - "version": "9.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/node-gyp": "^4.0.0", - "@npmcli/package-json": "^6.0.0", - "@npmcli/promise-spawn": "^8.0.0", - "node-gyp": "^11.0.0", - "proc-log": "^5.0.0", - "which": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, - "node_modules/npm/node_modules/@sigstore/protobuf-specs": { - "version": "0.3.2", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "engines": { - "node": "^16.14.0 || >=18.0.0" - } - }, - "node_modules/npm/node_modules/@sigstore/tuf": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "@sigstore/protobuf-specs": "^0.3.2", - "tuf-js": "^3.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/@tufjs/canonical-json": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": "^16.14.0 || >=18.0.0" - } - }, - "node_modules/npm/node_modules/abbrev": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/agent-base": { - "version": "7.1.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/npm/node_modules/aggregate-error": { - "version": "3.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/ansi-regex": { - "version": "5.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/ansi-styles": { - "version": "6.2.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/npm/node_modules/aproba": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/archy": { - "version": "1.0.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/balanced-match": { - "version": "1.0.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/bin-links": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "cmd-shim": "^7.0.0", - "npm-normalize-package-bin": "^4.0.0", - "proc-log": "^5.0.0", - "read-cmd-shim": "^5.0.0", - "write-file-atomic": "^6.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/binary-extensions": { - "version": "2.3.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/brace-expansion": { - "version": "2.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/npm/node_modules/cacache": { - "version": "19.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^4.0.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^7.0.2", - "ssri": "^12.0.0", - "tar": "^7.4.3", - "unique-filename": "^4.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/chownr": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/minizlib": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.0.4", - "rimraf": "^5.0.5" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/mkdirp": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "bin": { - "mkdirp": "dist/cjs/src/bin.js" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/p-map": { - "version": "7.0.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/tar": { - "version": "7.4.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.0.1", - "mkdirp": "^3.0.1", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/cacache/node_modules/yallist": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/chalk": { - "version": "5.3.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/npm/node_modules/chownr": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/ci-info": { - "version": "4.1.0", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/cidr-regex": { - "version": "4.1.1", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "dependencies": { - "ip-regex": "^5.0.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/npm/node_modules/clean-stack": { - "version": "2.2.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/npm/node_modules/cli-columns": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/npm/node_modules/cmd-shim": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/color-convert": { - "version": "2.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/npm/node_modules/color-name": { - "version": "1.1.4", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/common-ancestor-path": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/cross-spawn": { - "version": "7.0.6", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/npm/node_modules/cross-spawn/node_modules/which": { - "version": "2.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/npm/node_modules/cssesc": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm/node_modules/debug": { - "version": "4.3.7", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/npm/node_modules/diff": { - "version": "5.2.0", - "dev": true, - "inBundle": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/npm/node_modules/eastasianwidth": { - "version": "0.2.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/emoji-regex": { - "version": "8.0.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/encoding": { - "version": "0.1.13", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "iconv-lite": "^0.6.2" - } - }, - "node_modules/npm/node_modules/env-paths": { - "version": "2.2.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/npm/node_modules/err-code": { - "version": "2.0.3", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/exponential-backoff": { - "version": "3.1.1", - "dev": true, - "inBundle": true, - "license": "Apache-2.0" - }, - "node_modules/npm/node_modules/fastest-levenshtein": { - "version": "1.0.16", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 4.9.1" - } - }, - "node_modules/npm/node_modules/foreground-child": { - "version": "3.3.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/fs-minipass": { - "version": "3.0.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/npm/node_modules/glob": { - "version": "10.4.5", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/graceful-fs": { - "version": "4.2.11", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/hosted-git-info": { - "version": "8.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^10.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/http-cache-semantics": { - "version": "4.1.1", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause" - }, - "node_modules/npm/node_modules/http-proxy-agent": { - "version": "7.0.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.0", - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/npm/node_modules/https-proxy-agent": { - "version": "7.0.5", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.0.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/npm/node_modules/iconv-lite": { - "version": "0.6.3", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/npm/node_modules/ignore-walk": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minimatch": "^9.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/imurmurhash": { - "version": "0.1.4", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/npm/node_modules/indent-string": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/ini": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/init-package-json": { - "version": "7.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/package-json": "^6.0.0", - "npm-package-arg": "^12.0.0", - "promzard": "^2.0.0", - "read": "^4.0.0", - "semver": "^7.3.5", - "validate-npm-package-license": "^3.0.4", - "validate-npm-package-name": "^6.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/ip-address": { - "version": "9.0.5", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "jsbn": "1.1.0", - "sprintf-js": "^1.1.3" - }, - "engines": { - "node": ">= 12" - } - }, - "node_modules/npm/node_modules/ip-regex": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/is-cidr": { - "version": "5.1.0", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "dependencies": { - "cidr-regex": "^4.1.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/npm/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/isexe": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/jackspeak": { - "version": "3.4.3", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/npm/node_modules/jsbn": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/json-parse-even-better-errors": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/json-stringify-nice": { - "version": "1.1.4", - "dev": true, - "inBundle": true, - "license": "ISC", - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/jsonparse": { - "version": "1.3.1", - "dev": true, - "engines": [ - "node >= 0.2.0" - ], - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/just-diff": { - "version": "6.0.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/just-diff-apply": { - "version": "5.5.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/libnpmaccess": { - "version": "9.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-package-arg": "^12.0.0", - "npm-registry-fetch": "^18.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmdiff": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/arborist": "^8.0.0", - "@npmcli/installed-package-contents": "^3.0.0", - "binary-extensions": "^2.3.0", - "diff": "^5.1.0", - "minimatch": "^9.0.4", - "npm-package-arg": "^12.0.0", - "pacote": "^19.0.0", - "tar": "^6.2.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmexec": { - "version": "9.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/arborist": "^8.0.0", - "@npmcli/run-script": "^9.0.1", - "ci-info": "^4.0.0", - "npm-package-arg": "^12.0.0", - "pacote": "^19.0.0", - "proc-log": "^5.0.0", - "read": "^4.0.0", - "read-package-json-fast": "^4.0.0", - "semver": "^7.3.7", - "walk-up-path": "^3.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmfund": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/arborist": "^8.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmhook": { - "version": "11.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^18.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmorg": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^18.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmpack": { - "version": "8.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/arborist": "^8.0.0", - "@npmcli/run-script": "^9.0.1", - "npm-package-arg": "^12.0.0", - "pacote": "^19.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmpublish": { - "version": "10.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "ci-info": "^4.0.0", - "normalize-package-data": "^7.0.0", - "npm-package-arg": "^12.0.0", - "npm-registry-fetch": "^18.0.1", - "proc-log": "^5.0.0", - "semver": "^7.3.7", - "sigstore": "^3.0.0", - "ssri": "^12.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmsearch": { - "version": "8.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-registry-fetch": "^18.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmteam": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^18.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/libnpmversion": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/git": "^6.0.1", - "@npmcli/run-script": "^9.0.1", - "json-parse-even-better-errors": "^4.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.7" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/lru-cache": { - "version": "10.4.3", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/make-fetch-happen": { - "version": "14.0.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^3.0.0", - "cacache": "^19.0.1", - "http-cache-semantics": "^4.1.1", - "minipass": "^7.0.2", - "minipass-fetch": "^4.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^1.0.0", - "proc-log": "^5.0.0", - "promise-retry": "^2.0.1", - "ssri": "^12.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/make-fetch-happen/node_modules/negotiator": { - "version": "1.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/npm/node_modules/minimatch": { - "version": "9.0.5", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/minipass": { - "version": "7.1.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/npm/node_modules/minipass-collect": { - "version": "2.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/npm/node_modules/minipass-fetch": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^3.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - } - }, - "node_modules/npm/node_modules/minipass-fetch/node_modules/minizlib": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.0.4", - "rimraf": "^5.0.5" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/npm/node_modules/minipass-flush": { - "version": "1.0.5", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/npm/node_modules/minipass-flush/node_modules/minipass": { - "version": "3.3.6", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/minipass-pipeline": { - "version": "1.2.4", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/minipass-pipeline/node_modules/minipass": { - "version": "3.3.6", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/minipass-sized": { - "version": "1.0.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/minizlib": { - "version": "2.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/npm/node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/mkdirp": { - "version": "1.0.4", - "dev": true, - "inBundle": true, - "license": "MIT", - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/ms": { - "version": "2.1.3", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/mute-stream": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/node-gyp": { - "version": "11.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "env-paths": "^2.2.0", - "exponential-backoff": "^3.1.1", - "glob": "^10.3.10", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^14.0.3", - "nopt": "^8.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5", - "tar": "^7.4.3", - "which": "^5.0.0" - }, - "bin": { - "node-gyp": "bin/node-gyp.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/chownr": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/minizlib": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.0.4", - "rimraf": "^5.0.5" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/mkdirp": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "bin": { - "mkdirp": "dist/cjs/src/bin.js" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/tar": { - "version": "7.4.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.0.1", - "mkdirp": "^3.0.1", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/yallist": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/npm/node_modules/nopt": { - "version": "8.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "abbrev": "^2.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/nopt/node_modules/abbrev": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/npm/node_modules/normalize-package-data": { - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "dependencies": { - "hosted-git-info": "^8.0.0", - "semver": "^7.3.5", - "validate-npm-package-license": "^3.0.4" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-audit-report": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-bundled": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-normalize-package-bin": "^4.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-install-checks": { - "version": "7.1.1", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "dependencies": { - "semver": "^7.1.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-normalize-package-bin": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-package-arg": { - "version": "12.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "hosted-git-info": "^8.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5", - "validate-npm-package-name": "^6.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-packlist": { - "version": "9.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "ignore-walk": "^7.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-pick-manifest": { - "version": "10.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-install-checks": "^7.1.0", - "npm-normalize-package-bin": "^4.0.0", - "npm-package-arg": "^12.0.0", - "semver": "^7.3.5" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-profile": { - "version": "11.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "npm-registry-fetch": "^18.0.0", - "proc-log": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-registry-fetch": { - "version": "18.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/redact": "^3.0.0", - "jsonparse": "^1.3.1", - "make-fetch-happen": "^14.0.0", - "minipass": "^7.0.2", - "minipass-fetch": "^4.0.0", - "minizlib": "^3.0.1", - "npm-package-arg": "^12.0.0", - "proc-log": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/npm-registry-fetch/node_modules/minizlib": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "minipass": "^7.0.4", - "rimraf": "^5.0.5" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/npm/node_modules/npm-user-validate": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/p-map": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/package-json-from-dist": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0" - }, - "node_modules/npm/node_modules/pacote": { - "version": "19.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "@npmcli/git": "^6.0.0", - "@npmcli/installed-package-contents": "^3.0.0", - "@npmcli/package-json": "^6.0.0", - "@npmcli/promise-spawn": "^8.0.0", - "@npmcli/run-script": "^9.0.0", - "cacache": "^19.0.0", - "fs-minipass": "^3.0.0", - "minipass": "^7.0.2", - "npm-package-arg": "^12.0.0", - "npm-packlist": "^9.0.0", - "npm-pick-manifest": "^10.0.0", - "npm-registry-fetch": "^18.0.0", - "proc-log": "^5.0.0", - "promise-retry": "^2.0.1", - "sigstore": "^3.0.0", - "ssri": "^12.0.0", - "tar": "^6.1.11" - }, - "bin": { - "pacote": "bin/index.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/parse-conflict-json": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "json-parse-even-better-errors": "^4.0.0", - "just-diff": "^6.0.0", - "just-diff-apply": "^5.2.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/path-key": { - "version": "3.1.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/path-scurry": { - "version": "1.11.1", - "dev": true, - "inBundle": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/postcss-selector-parser": { - "version": "6.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm/node_modules/proc-log": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/proggy": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/promise-all-reject-late": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/promise-call-limit": { - "version": "3.0.2", - "dev": true, - "inBundle": true, - "license": "ISC", - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/promise-inflight": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/promise-retry": { - "version": "2.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/promzard": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "read": "^4.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/qrcode-terminal": { - "version": "0.12.0", - "dev": true, - "inBundle": true, - "bin": { - "qrcode-terminal": "bin/qrcode-terminal.js" - } - }, - "node_modules/npm/node_modules/read": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "mute-stream": "^2.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/read-cmd-shim": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/read-package-json-fast": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "json-parse-even-better-errors": "^4.0.0", - "npm-normalize-package-bin": "^4.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/retry": { - "version": "0.12.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/npm/node_modules/rimraf": { - "version": "5.0.10", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "glob": "^10.3.7" - }, - "bin": { - "rimraf": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/safer-buffer": { - "version": "2.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true - }, - "node_modules/npm/node_modules/semver": { - "version": "7.6.3", - "dev": true, - "inBundle": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/shebang-command": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/shebang-regex": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/signal-exit": { - "version": "4.1.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/npm/node_modules/sigstore": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "@sigstore/bundle": "^3.0.0", - "@sigstore/core": "^2.0.0", - "@sigstore/protobuf-specs": "^0.3.2", - "@sigstore/sign": "^3.0.0", - "@sigstore/tuf": "^3.0.0", - "@sigstore/verify": "^2.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/sigstore/node_modules/@sigstore/bundle": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "@sigstore/protobuf-specs": "^0.3.2" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/sigstore/node_modules/@sigstore/core": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/sigstore/node_modules/@sigstore/sign": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "@sigstore/bundle": "^3.0.0", - "@sigstore/core": "^2.0.0", - "@sigstore/protobuf-specs": "^0.3.2", - "make-fetch-happen": "^14.0.1", - "proc-log": "^5.0.0", - "promise-retry": "^2.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/sigstore/node_modules/@sigstore/verify": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "@sigstore/bundle": "^3.0.0", - "@sigstore/core": "^2.0.0", - "@sigstore/protobuf-specs": "^0.3.2" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/smart-buffer": { - "version": "4.2.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 6.0.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/npm/node_modules/socks": { - "version": "2.8.3", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ip-address": "^9.0.5", - "smart-buffer": "^4.2.0" - }, - "engines": { - "node": ">= 10.0.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/npm/node_modules/socks-proxy-agent": { - "version": "8.0.4", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.1", - "debug": "^4.3.4", - "socks": "^2.8.3" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/npm/node_modules/spdx-correct": { - "version": "3.2.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/npm/node_modules/spdx-exceptions": { - "version": "2.5.0", - "dev": true, - "inBundle": true, - "license": "CC-BY-3.0" - }, - "node_modules/npm/node_modules/spdx-expression-parse": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/npm/node_modules/spdx-license-ids": { - "version": "3.0.20", - "dev": true, - "inBundle": true, - "license": "CC0-1.0" - }, - "node_modules/npm/node_modules/sprintf-js": { - "version": "1.1.3", - "dev": true, - "inBundle": true, - "license": "BSD-3-Clause" - }, - "node_modules/npm/node_modules/ssri": { - "version": "12.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/string-width": { - "version": "4.2.3", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/strip-ansi": { - "version": "6.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/supports-color": { - "version": "9.4.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/npm/node_modules/tar": { - "version": "6.2.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/tar/node_modules/fs-minipass": { - "version": "2.1.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/npm/node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/tar/node_modules/minipass": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/text-table": { - "version": "0.2.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/tiny-relative-date": { - "version": "1.3.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/treeverse": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/npm/node_modules/tuf-js": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "@tufjs/models": "3.0.1", - "debug": "^4.3.6", - "make-fetch-happen": "^14.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/tuf-js/node_modules/@tufjs/models": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "@tufjs/canonical-json": "2.0.0", - "minimatch": "^9.0.5" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/unique-filename": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "unique-slug": "^5.0.0" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/unique-slug": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/util-deprecate": { - "version": "1.0.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/validate-npm-package-license": { - "version": "3.0.4", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "node_modules/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/npm/node_modules/validate-npm-package-name": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/walk-up-path": { - "version": "3.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/which": { - "version": "5.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "isexe": "^3.1.1" - }, - "bin": { - "node-which": "bin/which.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/which/node_modules/isexe": { - "version": "3.1.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=16" - } - }, - "node_modules/npm/node_modules/wrap-ansi": { - "version": "8.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/npm/node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/npm/node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { - "version": "4.3.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/npm/node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/npm/node_modules/wrap-ansi/node_modules/emoji-regex": { - "version": "9.2.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/wrap-ansi/node_modules/string-width": { - "version": "5.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm/node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/npm/node_modules/write-file-atomic": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/npm/node_modules/yallist": { - "version": "4.0.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, "node_modules/nth-check": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", @@ -18233,9 +15711,9 @@ } }, "node_modules/sucrase/node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", "dev": true, "license": "ISC", "dependencies": { diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index 0b8be570f18..db6b0bec4dc 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -90,7 +90,6 @@ "i": "^0.3.7", "jsdom": "^21.1.1", "msw": "^2.5.2", - "npm": "^10.9.2", "postcss": "^8.4.31", "prettier": "^2.2.1", "protobufjs-cli": "^1.1.3", diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index c14d702564d..04683c89cb9 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -18943,6 +18943,9 @@ export namespace topodata { /** Keyspace vtorc_state */ vtorc_state?: (vtorcdata.IKeyspace|null); + + /** Keyspace query_throttler_config */ + query_throttler_config?: (querythrottler.IConfig|null); } /** Represents a Keyspace. */ @@ -18975,6 +18978,9 @@ export namespace topodata { /** Keyspace vtorc_state. */ public vtorc_state?: (vtorcdata.IKeyspace|null); + /** Keyspace query_throttler_config. */ + public query_throttler_config?: (querythrottler.IConfig|null); + /** * Creates a new Keyspace instance using the specified properties. * @param [properties] Properties to set @@ -19931,6 +19937,9 @@ export namespace topodata { /** SrvKeyspace throttler_config */ throttler_config?: (topodata.IThrottlerConfig|null); + + /** SrvKeyspace query_throttler_config */ + query_throttler_config?: (querythrottler.IConfig|null); } /** Represents a SrvKeyspace. */ @@ -19948,6 +19957,9 @@ export namespace topodata { /** SrvKeyspace throttler_config. */ public throttler_config?: (topodata.IThrottlerConfig|null); + /** SrvKeyspace query_throttler_config. */ + public query_throttler_config?: (querythrottler.IConfig|null); + /** * Creates a new SrvKeyspace instance using the specified properties. * @param [properties] Properties to set @@ -20840,6 +20852,622 @@ export namespace vtorcdata { } } +/** Namespace querythrottler. */ +export namespace querythrottler { + + /** ThrottlingStrategy enum. */ + enum ThrottlingStrategy { + UNKNOWN = 0, + TABLET_THROTTLER = 1 + } + + /** Properties of a Config. */ + interface IConfig { + + /** Config enabled */ + enabled?: (boolean|null); + + /** Config strategy */ + strategy?: (querythrottler.ThrottlingStrategy|null); + + /** Config tablet_strategy_config */ + tablet_strategy_config?: (querythrottler.ITabletStrategyConfig|null); + + /** Config dry_run */ + dry_run?: (boolean|null); + } + + /** Represents a Config. */ + class Config implements IConfig { + + /** + * Constructs a new Config. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.IConfig); + + /** Config enabled. */ + public enabled: boolean; + + /** Config strategy. */ + public strategy: querythrottler.ThrottlingStrategy; + + /** Config tablet_strategy_config. */ + public tablet_strategy_config?: (querythrottler.ITabletStrategyConfig|null); + + /** Config dry_run. */ + public dry_run: boolean; + + /** + * Creates a new Config instance using the specified properties. + * @param [properties] Properties to set + * @returns Config instance + */ + public static create(properties?: querythrottler.IConfig): querythrottler.Config; + + /** + * Encodes the specified Config message. Does not implicitly {@link querythrottler.Config.verify|verify} messages. + * @param message Config message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.IConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Config message, length delimited. Does not implicitly {@link querythrottler.Config.verify|verify} messages. + * @param message Config message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.IConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Config message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Config + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.Config; + + /** + * Decodes a Config message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Config + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.Config; + + /** + * Verifies a Config message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Config message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Config + */ + public static fromObject(object: { [k: string]: any }): querythrottler.Config; + + /** + * Creates a plain object from a Config message. Also converts values to other types if specified. + * @param message Config + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.Config, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Config to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Config + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a TabletStrategyConfig. */ + interface ITabletStrategyConfig { + + /** TabletStrategyConfig tablet_rules */ + tablet_rules?: ({ [k: string]: querythrottler.IStatementRuleSet }|null); + } + + /** Represents a TabletStrategyConfig. */ + class TabletStrategyConfig implements ITabletStrategyConfig { + + /** + * Constructs a new TabletStrategyConfig. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.ITabletStrategyConfig); + + /** TabletStrategyConfig tablet_rules. */ + public tablet_rules: { [k: string]: querythrottler.IStatementRuleSet }; + + /** + * Creates a new TabletStrategyConfig instance using the specified properties. + * @param [properties] Properties to set + * @returns TabletStrategyConfig instance + */ + public static create(properties?: querythrottler.ITabletStrategyConfig): querythrottler.TabletStrategyConfig; + + /** + * Encodes the specified TabletStrategyConfig message. Does not implicitly {@link querythrottler.TabletStrategyConfig.verify|verify} messages. + * @param message TabletStrategyConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.ITabletStrategyConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TabletStrategyConfig message, length delimited. Does not implicitly {@link querythrottler.TabletStrategyConfig.verify|verify} messages. + * @param message TabletStrategyConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.ITabletStrategyConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TabletStrategyConfig message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TabletStrategyConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.TabletStrategyConfig; + + /** + * Decodes a TabletStrategyConfig message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TabletStrategyConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.TabletStrategyConfig; + + /** + * Verifies a TabletStrategyConfig message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TabletStrategyConfig message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TabletStrategyConfig + */ + public static fromObject(object: { [k: string]: any }): querythrottler.TabletStrategyConfig; + + /** + * Creates a plain object from a TabletStrategyConfig message. Also converts values to other types if specified. + * @param message TabletStrategyConfig + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.TabletStrategyConfig, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TabletStrategyConfig to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for TabletStrategyConfig + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a StatementRuleSet. */ + interface IStatementRuleSet { + + /** StatementRuleSet statement_rules */ + statement_rules?: ({ [k: string]: querythrottler.IMetricRuleSet }|null); + } + + /** Represents a StatementRuleSet. */ + class StatementRuleSet implements IStatementRuleSet { + + /** + * Constructs a new StatementRuleSet. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.IStatementRuleSet); + + /** StatementRuleSet statement_rules. */ + public statement_rules: { [k: string]: querythrottler.IMetricRuleSet }; + + /** + * Creates a new StatementRuleSet instance using the specified properties. + * @param [properties] Properties to set + * @returns StatementRuleSet instance + */ + public static create(properties?: querythrottler.IStatementRuleSet): querythrottler.StatementRuleSet; + + /** + * Encodes the specified StatementRuleSet message. Does not implicitly {@link querythrottler.StatementRuleSet.verify|verify} messages. + * @param message StatementRuleSet message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.IStatementRuleSet, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified StatementRuleSet message, length delimited. Does not implicitly {@link querythrottler.StatementRuleSet.verify|verify} messages. + * @param message StatementRuleSet message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.IStatementRuleSet, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a StatementRuleSet message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns StatementRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.StatementRuleSet; + + /** + * Decodes a StatementRuleSet message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns StatementRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.StatementRuleSet; + + /** + * Verifies a StatementRuleSet message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a StatementRuleSet message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns StatementRuleSet + */ + public static fromObject(object: { [k: string]: any }): querythrottler.StatementRuleSet; + + /** + * Creates a plain object from a StatementRuleSet message. Also converts values to other types if specified. + * @param message StatementRuleSet + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.StatementRuleSet, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this StatementRuleSet to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for StatementRuleSet + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a MetricRuleSet. */ + interface IMetricRuleSet { + + /** MetricRuleSet metric_rules */ + metric_rules?: ({ [k: string]: querythrottler.IMetricRule }|null); + } + + /** Represents a MetricRuleSet. */ + class MetricRuleSet implements IMetricRuleSet { + + /** + * Constructs a new MetricRuleSet. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.IMetricRuleSet); + + /** MetricRuleSet metric_rules. */ + public metric_rules: { [k: string]: querythrottler.IMetricRule }; + + /** + * Creates a new MetricRuleSet instance using the specified properties. + * @param [properties] Properties to set + * @returns MetricRuleSet instance + */ + public static create(properties?: querythrottler.IMetricRuleSet): querythrottler.MetricRuleSet; + + /** + * Encodes the specified MetricRuleSet message. Does not implicitly {@link querythrottler.MetricRuleSet.verify|verify} messages. + * @param message MetricRuleSet message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.IMetricRuleSet, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified MetricRuleSet message, length delimited. Does not implicitly {@link querythrottler.MetricRuleSet.verify|verify} messages. + * @param message MetricRuleSet message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.IMetricRuleSet, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a MetricRuleSet message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns MetricRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.MetricRuleSet; + + /** + * Decodes a MetricRuleSet message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns MetricRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.MetricRuleSet; + + /** + * Verifies a MetricRuleSet message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a MetricRuleSet message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns MetricRuleSet + */ + public static fromObject(object: { [k: string]: any }): querythrottler.MetricRuleSet; + + /** + * Creates a plain object from a MetricRuleSet message. Also converts values to other types if specified. + * @param message MetricRuleSet + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.MetricRuleSet, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this MetricRuleSet to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for MetricRuleSet + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a MetricRule. */ + interface IMetricRule { + + /** MetricRule thresholds */ + thresholds?: (querythrottler.IThrottleThreshold[]|null); + } + + /** Represents a MetricRule. */ + class MetricRule implements IMetricRule { + + /** + * Constructs a new MetricRule. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.IMetricRule); + + /** MetricRule thresholds. */ + public thresholds: querythrottler.IThrottleThreshold[]; + + /** + * Creates a new MetricRule instance using the specified properties. + * @param [properties] Properties to set + * @returns MetricRule instance + */ + public static create(properties?: querythrottler.IMetricRule): querythrottler.MetricRule; + + /** + * Encodes the specified MetricRule message. Does not implicitly {@link querythrottler.MetricRule.verify|verify} messages. + * @param message MetricRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.IMetricRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified MetricRule message, length delimited. Does not implicitly {@link querythrottler.MetricRule.verify|verify} messages. + * @param message MetricRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.IMetricRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a MetricRule message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns MetricRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.MetricRule; + + /** + * Decodes a MetricRule message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns MetricRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.MetricRule; + + /** + * Verifies a MetricRule message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a MetricRule message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns MetricRule + */ + public static fromObject(object: { [k: string]: any }): querythrottler.MetricRule; + + /** + * Creates a plain object from a MetricRule message. Also converts values to other types if specified. + * @param message MetricRule + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.MetricRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this MetricRule to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for MetricRule + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ThrottleThreshold. */ + interface IThrottleThreshold { + + /** ThrottleThreshold above */ + above?: (number|null); + + /** ThrottleThreshold throttle */ + throttle?: (number|null); + } + + /** Represents a ThrottleThreshold. */ + class ThrottleThreshold implements IThrottleThreshold { + + /** + * Constructs a new ThrottleThreshold. + * @param [properties] Properties to set + */ + constructor(properties?: querythrottler.IThrottleThreshold); + + /** ThrottleThreshold above. */ + public above: number; + + /** ThrottleThreshold throttle. */ + public throttle: number; + + /** + * Creates a new ThrottleThreshold instance using the specified properties. + * @param [properties] Properties to set + * @returns ThrottleThreshold instance + */ + public static create(properties?: querythrottler.IThrottleThreshold): querythrottler.ThrottleThreshold; + + /** + * Encodes the specified ThrottleThreshold message. Does not implicitly {@link querythrottler.ThrottleThreshold.verify|verify} messages. + * @param message ThrottleThreshold message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: querythrottler.IThrottleThreshold, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ThrottleThreshold message, length delimited. Does not implicitly {@link querythrottler.ThrottleThreshold.verify|verify} messages. + * @param message ThrottleThreshold message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: querythrottler.IThrottleThreshold, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ThrottleThreshold message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ThrottleThreshold + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): querythrottler.ThrottleThreshold; + + /** + * Decodes a ThrottleThreshold message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ThrottleThreshold + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): querythrottler.ThrottleThreshold; + + /** + * Verifies a ThrottleThreshold message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ThrottleThreshold message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ThrottleThreshold + */ + public static fromObject(object: { [k: string]: any }): querythrottler.ThrottleThreshold; + + /** + * Creates a plain object from a ThrottleThreshold message. Also converts values to other types if specified. + * @param message ThrottleThreshold + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: querythrottler.ThrottleThreshold, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ThrottleThreshold to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ThrottleThreshold + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } +} + /** Namespace vtrpc. */ export namespace vtrpc { @@ -30189,6 +30817,9 @@ export namespace tabletmanagerdata { /** Properties of a DemotePrimaryRequest. */ interface IDemotePrimaryRequest { + + /** DemotePrimaryRequest force */ + force?: (boolean|null); } /** Represents a DemotePrimaryRequest. */ @@ -30200,6 +30831,9 @@ export namespace tabletmanagerdata { */ constructor(properties?: tabletmanagerdata.IDemotePrimaryRequest); + /** DemotePrimaryRequest force. */ + public force: boolean; + /** * Creates a new DemotePrimaryRequest instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 51d5dadbfbc..9e4503774e8 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -43496,6 +43496,7 @@ export const topodata = $root.topodata = (() => { * @property {topodata.IThrottlerConfig|null} [throttler_config] Keyspace throttler_config * @property {string|null} [sidecar_db_name] Keyspace sidecar_db_name * @property {vtorcdata.IKeyspace|null} [vtorc_state] Keyspace vtorc_state + * @property {querythrottler.IConfig|null} [query_throttler_config] Keyspace query_throttler_config */ /** @@ -43569,6 +43570,14 @@ export const topodata = $root.topodata = (() => { */ Keyspace.prototype.vtorc_state = null; + /** + * Keyspace query_throttler_config. + * @member {querythrottler.IConfig|null|undefined} query_throttler_config + * @memberof topodata.Keyspace + * @instance + */ + Keyspace.prototype.query_throttler_config = null; + /** * Creates a new Keyspace instance using the specified properties. * @function create @@ -43607,6 +43616,8 @@ export const topodata = $root.topodata = (() => { writer.uint32(/* id 10, wireType 2 =*/82).string(message.sidecar_db_name); if (message.vtorc_state != null && Object.hasOwnProperty.call(message, "vtorc_state")) $root.vtorcdata.Keyspace.encode(message.vtorc_state, writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.query_throttler_config != null && Object.hasOwnProperty.call(message, "query_throttler_config")) + $root.querythrottler.Config.encode(message.query_throttler_config, writer.uint32(/* id 20000, wireType 2 =*/160002).fork()).ldelim(); return writer; }; @@ -43669,6 +43680,10 @@ export const topodata = $root.topodata = (() => { message.vtorc_state = $root.vtorcdata.Keyspace.decode(reader, reader.uint32()); break; } + case 20000: { + message.query_throttler_config = $root.querythrottler.Config.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -43736,6 +43751,11 @@ export const topodata = $root.topodata = (() => { if (error) return "vtorc_state." + error; } + if (message.query_throttler_config != null && message.hasOwnProperty("query_throttler_config")) { + let error = $root.querythrottler.Config.verify(message.query_throttler_config); + if (error) + return "query_throttler_config." + error; + } return null; }; @@ -43788,6 +43808,11 @@ export const topodata = $root.topodata = (() => { throw TypeError(".topodata.Keyspace.vtorc_state: object expected"); message.vtorc_state = $root.vtorcdata.Keyspace.fromObject(object.vtorc_state); } + if (object.query_throttler_config != null) { + if (typeof object.query_throttler_config !== "object") + throw TypeError(".topodata.Keyspace.query_throttler_config: object expected"); + message.query_throttler_config = $root.querythrottler.Config.fromObject(object.query_throttler_config); + } return message; }; @@ -43812,6 +43837,7 @@ export const topodata = $root.topodata = (() => { object.throttler_config = null; object.sidecar_db_name = ""; object.vtorc_state = null; + object.query_throttler_config = null; } if (message.keyspace_type != null && message.hasOwnProperty("keyspace_type")) object.keyspace_type = options.enums === String ? $root.topodata.KeyspaceType[message.keyspace_type] === undefined ? message.keyspace_type : $root.topodata.KeyspaceType[message.keyspace_type] : message.keyspace_type; @@ -43827,6 +43853,8 @@ export const topodata = $root.topodata = (() => { object.sidecar_db_name = message.sidecar_db_name; if (message.vtorc_state != null && message.hasOwnProperty("vtorc_state")) object.vtorc_state = $root.vtorcdata.Keyspace.toObject(message.vtorc_state, options); + if (message.query_throttler_config != null && message.hasOwnProperty("query_throttler_config")) + object.query_throttler_config = $root.querythrottler.Config.toObject(message.query_throttler_config, options); return object; }; @@ -46016,6 +46044,7 @@ export const topodata = $root.topodata = (() => { * @interface ISrvKeyspace * @property {Array.|null} [partitions] SrvKeyspace partitions * @property {topodata.IThrottlerConfig|null} [throttler_config] SrvKeyspace throttler_config + * @property {querythrottler.IConfig|null} [query_throttler_config] SrvKeyspace query_throttler_config */ /** @@ -46050,6 +46079,14 @@ export const topodata = $root.topodata = (() => { */ SrvKeyspace.prototype.throttler_config = null; + /** + * SrvKeyspace query_throttler_config. + * @member {querythrottler.IConfig|null|undefined} query_throttler_config + * @memberof topodata.SrvKeyspace + * @instance + */ + SrvKeyspace.prototype.query_throttler_config = null; + /** * Creates a new SrvKeyspace instance using the specified properties. * @function create @@ -46079,6 +46116,8 @@ export const topodata = $root.topodata = (() => { $root.topodata.SrvKeyspace.KeyspacePartition.encode(message.partitions[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.throttler_config != null && Object.hasOwnProperty.call(message, "throttler_config")) $root.topodata.ThrottlerConfig.encode(message.throttler_config, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.query_throttler_config != null && Object.hasOwnProperty.call(message, "query_throttler_config")) + $root.querythrottler.Config.encode(message.query_throttler_config, writer.uint32(/* id 20000, wireType 2 =*/160002).fork()).ldelim(); return writer; }; @@ -46123,6 +46162,10 @@ export const topodata = $root.topodata = (() => { message.throttler_config = $root.topodata.ThrottlerConfig.decode(reader, reader.uint32()); break; } + case 20000: { + message.query_throttler_config = $root.querythrottler.Config.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -46172,6 +46215,11 @@ export const topodata = $root.topodata = (() => { if (error) return "throttler_config." + error; } + if (message.query_throttler_config != null && message.hasOwnProperty("query_throttler_config")) { + let error = $root.querythrottler.Config.verify(message.query_throttler_config); + if (error) + return "query_throttler_config." + error; + } return null; }; @@ -46202,6 +46250,11 @@ export const topodata = $root.topodata = (() => { throw TypeError(".topodata.SrvKeyspace.throttler_config: object expected"); message.throttler_config = $root.topodata.ThrottlerConfig.fromObject(object.throttler_config); } + if (object.query_throttler_config != null) { + if (typeof object.query_throttler_config !== "object") + throw TypeError(".topodata.SrvKeyspace.query_throttler_config: object expected"); + message.query_throttler_config = $root.querythrottler.Config.fromObject(object.query_throttler_config); + } return message; }; @@ -46220,8 +46273,10 @@ export const topodata = $root.topodata = (() => { let object = {}; if (options.arrays || options.defaults) object.partitions = []; - if (options.defaults) + if (options.defaults) { object.throttler_config = null; + object.query_throttler_config = null; + } if (message.partitions && message.partitions.length) { object.partitions = []; for (let j = 0; j < message.partitions.length; ++j) @@ -46229,6 +46284,8 @@ export const topodata = $root.topodata = (() => { } if (message.throttler_config != null && message.hasOwnProperty("throttler_config")) object.throttler_config = $root.topodata.ThrottlerConfig.toObject(message.throttler_config, options); + if (message.query_throttler_config != null && message.hasOwnProperty("query_throttler_config")) + object.query_throttler_config = $root.querythrottler.Config.toObject(message.query_throttler_config, options); return object; }; @@ -47631,151 +47688,1616 @@ export const topodata = $root.topodata = (() => { }; /** - * Decodes an ExternalClusters message from the specified reader or buffer, length delimited. + * Decodes an ExternalClusters message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof topodata.ExternalClusters + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {topodata.ExternalClusters} ExternalClusters + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExternalClusters.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExternalClusters message. + * @function verify + * @memberof topodata.ExternalClusters + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExternalClusters.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.vitess_cluster != null && message.hasOwnProperty("vitess_cluster")) { + if (!Array.isArray(message.vitess_cluster)) + return "vitess_cluster: array expected"; + for (let i = 0; i < message.vitess_cluster.length; ++i) { + let error = $root.topodata.ExternalVitessCluster.verify(message.vitess_cluster[i]); + if (error) + return "vitess_cluster." + error; + } + } + return null; + }; + + /** + * Creates an ExternalClusters message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof topodata.ExternalClusters + * @static + * @param {Object.} object Plain object + * @returns {topodata.ExternalClusters} ExternalClusters + */ + ExternalClusters.fromObject = function fromObject(object) { + if (object instanceof $root.topodata.ExternalClusters) + return object; + let message = new $root.topodata.ExternalClusters(); + if (object.vitess_cluster) { + if (!Array.isArray(object.vitess_cluster)) + throw TypeError(".topodata.ExternalClusters.vitess_cluster: array expected"); + message.vitess_cluster = []; + for (let i = 0; i < object.vitess_cluster.length; ++i) { + if (typeof object.vitess_cluster[i] !== "object") + throw TypeError(".topodata.ExternalClusters.vitess_cluster: object expected"); + message.vitess_cluster[i] = $root.topodata.ExternalVitessCluster.fromObject(object.vitess_cluster[i]); + } + } + return message; + }; + + /** + * Creates a plain object from an ExternalClusters message. Also converts values to other types if specified. + * @function toObject + * @memberof topodata.ExternalClusters + * @static + * @param {topodata.ExternalClusters} message ExternalClusters + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExternalClusters.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.vitess_cluster = []; + if (message.vitess_cluster && message.vitess_cluster.length) { + object.vitess_cluster = []; + for (let j = 0; j < message.vitess_cluster.length; ++j) + object.vitess_cluster[j] = $root.topodata.ExternalVitessCluster.toObject(message.vitess_cluster[j], options); + } + return object; + }; + + /** + * Converts this ExternalClusters to JSON. + * @function toJSON + * @memberof topodata.ExternalClusters + * @instance + * @returns {Object.} JSON object + */ + ExternalClusters.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExternalClusters + * @function getTypeUrl + * @memberof topodata.ExternalClusters + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExternalClusters.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/topodata.ExternalClusters"; + }; + + return ExternalClusters; + })(); + + return topodata; +})(); + +export const vtorcdata = $root.vtorcdata = (() => { + + /** + * Namespace vtorcdata. + * @exports vtorcdata + * @namespace + */ + const vtorcdata = {}; + + vtorcdata.Keyspace = (function() { + + /** + * Properties of a Keyspace. + * @memberof vtorcdata + * @interface IKeyspace + * @property {boolean|null} [disable_emergency_reparent] Keyspace disable_emergency_reparent + */ + + /** + * Constructs a new Keyspace. + * @memberof vtorcdata + * @classdesc Represents a Keyspace. + * @implements IKeyspace + * @constructor + * @param {vtorcdata.IKeyspace=} [properties] Properties to set + */ + function Keyspace(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Keyspace disable_emergency_reparent. + * @member {boolean} disable_emergency_reparent + * @memberof vtorcdata.Keyspace + * @instance + */ + Keyspace.prototype.disable_emergency_reparent = false; + + /** + * Creates a new Keyspace instance using the specified properties. + * @function create + * @memberof vtorcdata.Keyspace + * @static + * @param {vtorcdata.IKeyspace=} [properties] Properties to set + * @returns {vtorcdata.Keyspace} Keyspace instance + */ + Keyspace.create = function create(properties) { + return new Keyspace(properties); + }; + + /** + * Encodes the specified Keyspace message. Does not implicitly {@link vtorcdata.Keyspace.verify|verify} messages. + * @function encode + * @memberof vtorcdata.Keyspace + * @static + * @param {vtorcdata.IKeyspace} message Keyspace message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Keyspace.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.disable_emergency_reparent != null && Object.hasOwnProperty.call(message, "disable_emergency_reparent")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.disable_emergency_reparent); + return writer; + }; + + /** + * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtorcdata.Keyspace.verify|verify} messages. + * @function encodeDelimited + * @memberof vtorcdata.Keyspace + * @static + * @param {vtorcdata.IKeyspace} message Keyspace message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Keyspace.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Keyspace message from the specified reader or buffer. + * @function decode + * @memberof vtorcdata.Keyspace + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtorcdata.Keyspace} Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Keyspace.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtorcdata.Keyspace(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.disable_emergency_reparent = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtorcdata.Keyspace + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtorcdata.Keyspace} Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Keyspace.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Keyspace message. + * @function verify + * @memberof vtorcdata.Keyspace + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Keyspace.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) + if (typeof message.disable_emergency_reparent !== "boolean") + return "disable_emergency_reparent: boolean expected"; + return null; + }; + + /** + * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtorcdata.Keyspace + * @static + * @param {Object.} object Plain object + * @returns {vtorcdata.Keyspace} Keyspace + */ + Keyspace.fromObject = function fromObject(object) { + if (object instanceof $root.vtorcdata.Keyspace) + return object; + let message = new $root.vtorcdata.Keyspace(); + if (object.disable_emergency_reparent != null) + message.disable_emergency_reparent = Boolean(object.disable_emergency_reparent); + return message; + }; + + /** + * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * @function toObject + * @memberof vtorcdata.Keyspace + * @static + * @param {vtorcdata.Keyspace} message Keyspace + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Keyspace.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.disable_emergency_reparent = false; + if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) + object.disable_emergency_reparent = message.disable_emergency_reparent; + return object; + }; + + /** + * Converts this Keyspace to JSON. + * @function toJSON + * @memberof vtorcdata.Keyspace + * @instance + * @returns {Object.} JSON object + */ + Keyspace.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Keyspace + * @function getTypeUrl + * @memberof vtorcdata.Keyspace + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtorcdata.Keyspace"; + }; + + return Keyspace; + })(); + + vtorcdata.Shard = (function() { + + /** + * Properties of a Shard. + * @memberof vtorcdata + * @interface IShard + * @property {boolean|null} [disable_emergency_reparent] Shard disable_emergency_reparent + */ + + /** + * Constructs a new Shard. + * @memberof vtorcdata + * @classdesc Represents a Shard. + * @implements IShard + * @constructor + * @param {vtorcdata.IShard=} [properties] Properties to set + */ + function Shard(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Shard disable_emergency_reparent. + * @member {boolean} disable_emergency_reparent + * @memberof vtorcdata.Shard + * @instance + */ + Shard.prototype.disable_emergency_reparent = false; + + /** + * Creates a new Shard instance using the specified properties. + * @function create + * @memberof vtorcdata.Shard + * @static + * @param {vtorcdata.IShard=} [properties] Properties to set + * @returns {vtorcdata.Shard} Shard instance + */ + Shard.create = function create(properties) { + return new Shard(properties); + }; + + /** + * Encodes the specified Shard message. Does not implicitly {@link vtorcdata.Shard.verify|verify} messages. + * @function encode + * @memberof vtorcdata.Shard + * @static + * @param {vtorcdata.IShard} message Shard message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Shard.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.disable_emergency_reparent != null && Object.hasOwnProperty.call(message, "disable_emergency_reparent")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.disable_emergency_reparent); + return writer; + }; + + /** + * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtorcdata.Shard.verify|verify} messages. + * @function encodeDelimited + * @memberof vtorcdata.Shard + * @static + * @param {vtorcdata.IShard} message Shard message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Shard.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Shard message from the specified reader or buffer. + * @function decode + * @memberof vtorcdata.Shard + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtorcdata.Shard} Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Shard.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtorcdata.Shard(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.disable_emergency_reparent = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Shard message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtorcdata.Shard + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtorcdata.Shard} Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Shard.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Shard message. + * @function verify + * @memberof vtorcdata.Shard + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Shard.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) + if (typeof message.disable_emergency_reparent !== "boolean") + return "disable_emergency_reparent: boolean expected"; + return null; + }; + + /** + * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtorcdata.Shard + * @static + * @param {Object.} object Plain object + * @returns {vtorcdata.Shard} Shard + */ + Shard.fromObject = function fromObject(object) { + if (object instanceof $root.vtorcdata.Shard) + return object; + let message = new $root.vtorcdata.Shard(); + if (object.disable_emergency_reparent != null) + message.disable_emergency_reparent = Boolean(object.disable_emergency_reparent); + return message; + }; + + /** + * Creates a plain object from a Shard message. Also converts values to other types if specified. + * @function toObject + * @memberof vtorcdata.Shard + * @static + * @param {vtorcdata.Shard} message Shard + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Shard.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.disable_emergency_reparent = false; + if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) + object.disable_emergency_reparent = message.disable_emergency_reparent; + return object; + }; + + /** + * Converts this Shard to JSON. + * @function toJSON + * @memberof vtorcdata.Shard + * @instance + * @returns {Object.} JSON object + */ + Shard.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Shard + * @function getTypeUrl + * @memberof vtorcdata.Shard + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Shard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtorcdata.Shard"; + }; + + return Shard; + })(); + + return vtorcdata; +})(); + +export const querythrottler = $root.querythrottler = (() => { + + /** + * Namespace querythrottler. + * @exports querythrottler + * @namespace + */ + const querythrottler = {}; + + /** + * ThrottlingStrategy enum. + * @name querythrottler.ThrottlingStrategy + * @enum {number} + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} TABLET_THROTTLER=1 TABLET_THROTTLER value + */ + querythrottler.ThrottlingStrategy = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "TABLET_THROTTLER"] = 1; + return values; + })(); + + querythrottler.Config = (function() { + + /** + * Properties of a Config. + * @memberof querythrottler + * @interface IConfig + * @property {boolean|null} [enabled] Config enabled + * @property {querythrottler.ThrottlingStrategy|null} [strategy] Config strategy + * @property {querythrottler.ITabletStrategyConfig|null} [tablet_strategy_config] Config tablet_strategy_config + * @property {boolean|null} [dry_run] Config dry_run + */ + + /** + * Constructs a new Config. + * @memberof querythrottler + * @classdesc Represents a Config. + * @implements IConfig + * @constructor + * @param {querythrottler.IConfig=} [properties] Properties to set + */ + function Config(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Config enabled. + * @member {boolean} enabled + * @memberof querythrottler.Config + * @instance + */ + Config.prototype.enabled = false; + + /** + * Config strategy. + * @member {querythrottler.ThrottlingStrategy} strategy + * @memberof querythrottler.Config + * @instance + */ + Config.prototype.strategy = 0; + + /** + * Config tablet_strategy_config. + * @member {querythrottler.ITabletStrategyConfig|null|undefined} tablet_strategy_config + * @memberof querythrottler.Config + * @instance + */ + Config.prototype.tablet_strategy_config = null; + + /** + * Config dry_run. + * @member {boolean} dry_run + * @memberof querythrottler.Config + * @instance + */ + Config.prototype.dry_run = false; + + /** + * Creates a new Config instance using the specified properties. + * @function create + * @memberof querythrottler.Config + * @static + * @param {querythrottler.IConfig=} [properties] Properties to set + * @returns {querythrottler.Config} Config instance + */ + Config.create = function create(properties) { + return new Config(properties); + }; + + /** + * Encodes the specified Config message. Does not implicitly {@link querythrottler.Config.verify|verify} messages. + * @function encode + * @memberof querythrottler.Config + * @static + * @param {querythrottler.IConfig} message Config message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Config.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.enabled != null && Object.hasOwnProperty.call(message, "enabled")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.enabled); + if (message.strategy != null && Object.hasOwnProperty.call(message, "strategy")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.strategy); + if (message.tablet_strategy_config != null && Object.hasOwnProperty.call(message, "tablet_strategy_config")) + $root.querythrottler.TabletStrategyConfig.encode(message.tablet_strategy_config, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.dry_run); + return writer; + }; + + /** + * Encodes the specified Config message, length delimited. Does not implicitly {@link querythrottler.Config.verify|verify} messages. + * @function encodeDelimited + * @memberof querythrottler.Config + * @static + * @param {querythrottler.IConfig} message Config message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Config.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Config message from the specified reader or buffer. + * @function decode + * @memberof querythrottler.Config + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {querythrottler.Config} Config + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Config.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.Config(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.enabled = reader.bool(); + break; + } + case 2: { + message.strategy = reader.int32(); + break; + } + case 3: { + message.tablet_strategy_config = $root.querythrottler.TabletStrategyConfig.decode(reader, reader.uint32()); + break; + } + case 4: { + message.dry_run = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Config message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof querythrottler.Config + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {querythrottler.Config} Config + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Config.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Config message. + * @function verify + * @memberof querythrottler.Config + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Config.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.enabled != null && message.hasOwnProperty("enabled")) + if (typeof message.enabled !== "boolean") + return "enabled: boolean expected"; + if (message.strategy != null && message.hasOwnProperty("strategy")) + switch (message.strategy) { + default: + return "strategy: enum value expected"; + case 0: + case 1: + break; + } + if (message.tablet_strategy_config != null && message.hasOwnProperty("tablet_strategy_config")) { + let error = $root.querythrottler.TabletStrategyConfig.verify(message.tablet_strategy_config); + if (error) + return "tablet_strategy_config." + error; + } + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; + return null; + }; + + /** + * Creates a Config message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof querythrottler.Config + * @static + * @param {Object.} object Plain object + * @returns {querythrottler.Config} Config + */ + Config.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.Config) + return object; + let message = new $root.querythrottler.Config(); + if (object.enabled != null) + message.enabled = Boolean(object.enabled); + switch (object.strategy) { + default: + if (typeof object.strategy === "number") { + message.strategy = object.strategy; + break; + } + break; + case "UNKNOWN": + case 0: + message.strategy = 0; + break; + case "TABLET_THROTTLER": + case 1: + message.strategy = 1; + break; + } + if (object.tablet_strategy_config != null) { + if (typeof object.tablet_strategy_config !== "object") + throw TypeError(".querythrottler.Config.tablet_strategy_config: object expected"); + message.tablet_strategy_config = $root.querythrottler.TabletStrategyConfig.fromObject(object.tablet_strategy_config); + } + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); + return message; + }; + + /** + * Creates a plain object from a Config message. Also converts values to other types if specified. + * @function toObject + * @memberof querythrottler.Config + * @static + * @param {querythrottler.Config} message Config + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Config.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.enabled = false; + object.strategy = options.enums === String ? "UNKNOWN" : 0; + object.tablet_strategy_config = null; + object.dry_run = false; + } + if (message.enabled != null && message.hasOwnProperty("enabled")) + object.enabled = message.enabled; + if (message.strategy != null && message.hasOwnProperty("strategy")) + object.strategy = options.enums === String ? $root.querythrottler.ThrottlingStrategy[message.strategy] === undefined ? message.strategy : $root.querythrottler.ThrottlingStrategy[message.strategy] : message.strategy; + if (message.tablet_strategy_config != null && message.hasOwnProperty("tablet_strategy_config")) + object.tablet_strategy_config = $root.querythrottler.TabletStrategyConfig.toObject(message.tablet_strategy_config, options); + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; + return object; + }; + + /** + * Converts this Config to JSON. + * @function toJSON + * @memberof querythrottler.Config + * @instance + * @returns {Object.} JSON object + */ + Config.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Config + * @function getTypeUrl + * @memberof querythrottler.Config + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Config.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/querythrottler.Config"; + }; + + return Config; + })(); + + querythrottler.TabletStrategyConfig = (function() { + + /** + * Properties of a TabletStrategyConfig. + * @memberof querythrottler + * @interface ITabletStrategyConfig + * @property {Object.|null} [tablet_rules] TabletStrategyConfig tablet_rules + */ + + /** + * Constructs a new TabletStrategyConfig. + * @memberof querythrottler + * @classdesc Represents a TabletStrategyConfig. + * @implements ITabletStrategyConfig + * @constructor + * @param {querythrottler.ITabletStrategyConfig=} [properties] Properties to set + */ + function TabletStrategyConfig(properties) { + this.tablet_rules = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TabletStrategyConfig tablet_rules. + * @member {Object.} tablet_rules + * @memberof querythrottler.TabletStrategyConfig + * @instance + */ + TabletStrategyConfig.prototype.tablet_rules = $util.emptyObject; + + /** + * Creates a new TabletStrategyConfig instance using the specified properties. + * @function create + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {querythrottler.ITabletStrategyConfig=} [properties] Properties to set + * @returns {querythrottler.TabletStrategyConfig} TabletStrategyConfig instance + */ + TabletStrategyConfig.create = function create(properties) { + return new TabletStrategyConfig(properties); + }; + + /** + * Encodes the specified TabletStrategyConfig message. Does not implicitly {@link querythrottler.TabletStrategyConfig.verify|verify} messages. + * @function encode + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {querythrottler.ITabletStrategyConfig} message TabletStrategyConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TabletStrategyConfig.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_rules != null && Object.hasOwnProperty.call(message, "tablet_rules")) + for (let keys = Object.keys(message.tablet_rules), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.querythrottler.StatementRuleSet.encode(message.tablet_rules[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + return writer; + }; + + /** + * Encodes the specified TabletStrategyConfig message, length delimited. Does not implicitly {@link querythrottler.TabletStrategyConfig.verify|verify} messages. + * @function encodeDelimited + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {querythrottler.ITabletStrategyConfig} message TabletStrategyConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TabletStrategyConfig.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TabletStrategyConfig message from the specified reader or buffer. + * @function decode + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {querythrottler.TabletStrategyConfig} TabletStrategyConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TabletStrategyConfig.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.TabletStrategyConfig(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.tablet_rules === $util.emptyObject) + message.tablet_rules = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.querythrottler.StatementRuleSet.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.tablet_rules[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TabletStrategyConfig message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {querythrottler.TabletStrategyConfig} TabletStrategyConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TabletStrategyConfig.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TabletStrategyConfig message. + * @function verify + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TabletStrategyConfig.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_rules != null && message.hasOwnProperty("tablet_rules")) { + if (!$util.isObject(message.tablet_rules)) + return "tablet_rules: object expected"; + let key = Object.keys(message.tablet_rules); + for (let i = 0; i < key.length; ++i) { + let error = $root.querythrottler.StatementRuleSet.verify(message.tablet_rules[key[i]]); + if (error) + return "tablet_rules." + error; + } + } + return null; + }; + + /** + * Creates a TabletStrategyConfig message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {Object.} object Plain object + * @returns {querythrottler.TabletStrategyConfig} TabletStrategyConfig + */ + TabletStrategyConfig.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.TabletStrategyConfig) + return object; + let message = new $root.querythrottler.TabletStrategyConfig(); + if (object.tablet_rules) { + if (typeof object.tablet_rules !== "object") + throw TypeError(".querythrottler.TabletStrategyConfig.tablet_rules: object expected"); + message.tablet_rules = {}; + for (let keys = Object.keys(object.tablet_rules), i = 0; i < keys.length; ++i) { + if (typeof object.tablet_rules[keys[i]] !== "object") + throw TypeError(".querythrottler.TabletStrategyConfig.tablet_rules: object expected"); + message.tablet_rules[keys[i]] = $root.querythrottler.StatementRuleSet.fromObject(object.tablet_rules[keys[i]]); + } + } + return message; + }; + + /** + * Creates a plain object from a TabletStrategyConfig message. Also converts values to other types if specified. + * @function toObject + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {querythrottler.TabletStrategyConfig} message TabletStrategyConfig + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TabletStrategyConfig.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.tablet_rules = {}; + let keys2; + if (message.tablet_rules && (keys2 = Object.keys(message.tablet_rules)).length) { + object.tablet_rules = {}; + for (let j = 0; j < keys2.length; ++j) + object.tablet_rules[keys2[j]] = $root.querythrottler.StatementRuleSet.toObject(message.tablet_rules[keys2[j]], options); + } + return object; + }; + + /** + * Converts this TabletStrategyConfig to JSON. + * @function toJSON + * @memberof querythrottler.TabletStrategyConfig + * @instance + * @returns {Object.} JSON object + */ + TabletStrategyConfig.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TabletStrategyConfig + * @function getTypeUrl + * @memberof querythrottler.TabletStrategyConfig + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TabletStrategyConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/querythrottler.TabletStrategyConfig"; + }; + + return TabletStrategyConfig; + })(); + + querythrottler.StatementRuleSet = (function() { + + /** + * Properties of a StatementRuleSet. + * @memberof querythrottler + * @interface IStatementRuleSet + * @property {Object.|null} [statement_rules] StatementRuleSet statement_rules + */ + + /** + * Constructs a new StatementRuleSet. + * @memberof querythrottler + * @classdesc Represents a StatementRuleSet. + * @implements IStatementRuleSet + * @constructor + * @param {querythrottler.IStatementRuleSet=} [properties] Properties to set + */ + function StatementRuleSet(properties) { + this.statement_rules = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * StatementRuleSet statement_rules. + * @member {Object.} statement_rules + * @memberof querythrottler.StatementRuleSet + * @instance + */ + StatementRuleSet.prototype.statement_rules = $util.emptyObject; + + /** + * Creates a new StatementRuleSet instance using the specified properties. + * @function create + * @memberof querythrottler.StatementRuleSet + * @static + * @param {querythrottler.IStatementRuleSet=} [properties] Properties to set + * @returns {querythrottler.StatementRuleSet} StatementRuleSet instance + */ + StatementRuleSet.create = function create(properties) { + return new StatementRuleSet(properties); + }; + + /** + * Encodes the specified StatementRuleSet message. Does not implicitly {@link querythrottler.StatementRuleSet.verify|verify} messages. + * @function encode + * @memberof querythrottler.StatementRuleSet + * @static + * @param {querythrottler.IStatementRuleSet} message StatementRuleSet message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + StatementRuleSet.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.statement_rules != null && Object.hasOwnProperty.call(message, "statement_rules")) + for (let keys = Object.keys(message.statement_rules), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.querythrottler.MetricRuleSet.encode(message.statement_rules[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + return writer; + }; + + /** + * Encodes the specified StatementRuleSet message, length delimited. Does not implicitly {@link querythrottler.StatementRuleSet.verify|verify} messages. + * @function encodeDelimited + * @memberof querythrottler.StatementRuleSet + * @static + * @param {querythrottler.IStatementRuleSet} message StatementRuleSet message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + StatementRuleSet.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a StatementRuleSet message from the specified reader or buffer. + * @function decode + * @memberof querythrottler.StatementRuleSet + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {querythrottler.StatementRuleSet} StatementRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + StatementRuleSet.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.StatementRuleSet(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.statement_rules === $util.emptyObject) + message.statement_rules = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.querythrottler.MetricRuleSet.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.statement_rules[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a StatementRuleSet message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof querythrottler.StatementRuleSet + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {querythrottler.StatementRuleSet} StatementRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + StatementRuleSet.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a StatementRuleSet message. + * @function verify + * @memberof querythrottler.StatementRuleSet + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + StatementRuleSet.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.statement_rules != null && message.hasOwnProperty("statement_rules")) { + if (!$util.isObject(message.statement_rules)) + return "statement_rules: object expected"; + let key = Object.keys(message.statement_rules); + for (let i = 0; i < key.length; ++i) { + let error = $root.querythrottler.MetricRuleSet.verify(message.statement_rules[key[i]]); + if (error) + return "statement_rules." + error; + } + } + return null; + }; + + /** + * Creates a StatementRuleSet message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof querythrottler.StatementRuleSet + * @static + * @param {Object.} object Plain object + * @returns {querythrottler.StatementRuleSet} StatementRuleSet + */ + StatementRuleSet.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.StatementRuleSet) + return object; + let message = new $root.querythrottler.StatementRuleSet(); + if (object.statement_rules) { + if (typeof object.statement_rules !== "object") + throw TypeError(".querythrottler.StatementRuleSet.statement_rules: object expected"); + message.statement_rules = {}; + for (let keys = Object.keys(object.statement_rules), i = 0; i < keys.length; ++i) { + if (typeof object.statement_rules[keys[i]] !== "object") + throw TypeError(".querythrottler.StatementRuleSet.statement_rules: object expected"); + message.statement_rules[keys[i]] = $root.querythrottler.MetricRuleSet.fromObject(object.statement_rules[keys[i]]); + } + } + return message; + }; + + /** + * Creates a plain object from a StatementRuleSet message. Also converts values to other types if specified. + * @function toObject + * @memberof querythrottler.StatementRuleSet + * @static + * @param {querythrottler.StatementRuleSet} message StatementRuleSet + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + StatementRuleSet.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.statement_rules = {}; + let keys2; + if (message.statement_rules && (keys2 = Object.keys(message.statement_rules)).length) { + object.statement_rules = {}; + for (let j = 0; j < keys2.length; ++j) + object.statement_rules[keys2[j]] = $root.querythrottler.MetricRuleSet.toObject(message.statement_rules[keys2[j]], options); + } + return object; + }; + + /** + * Converts this StatementRuleSet to JSON. + * @function toJSON + * @memberof querythrottler.StatementRuleSet + * @instance + * @returns {Object.} JSON object + */ + StatementRuleSet.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for StatementRuleSet + * @function getTypeUrl + * @memberof querythrottler.StatementRuleSet + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + StatementRuleSet.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/querythrottler.StatementRuleSet"; + }; + + return StatementRuleSet; + })(); + + querythrottler.MetricRuleSet = (function() { + + /** + * Properties of a MetricRuleSet. + * @memberof querythrottler + * @interface IMetricRuleSet + * @property {Object.|null} [metric_rules] MetricRuleSet metric_rules + */ + + /** + * Constructs a new MetricRuleSet. + * @memberof querythrottler + * @classdesc Represents a MetricRuleSet. + * @implements IMetricRuleSet + * @constructor + * @param {querythrottler.IMetricRuleSet=} [properties] Properties to set + */ + function MetricRuleSet(properties) { + this.metric_rules = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * MetricRuleSet metric_rules. + * @member {Object.} metric_rules + * @memberof querythrottler.MetricRuleSet + * @instance + */ + MetricRuleSet.prototype.metric_rules = $util.emptyObject; + + /** + * Creates a new MetricRuleSet instance using the specified properties. + * @function create + * @memberof querythrottler.MetricRuleSet + * @static + * @param {querythrottler.IMetricRuleSet=} [properties] Properties to set + * @returns {querythrottler.MetricRuleSet} MetricRuleSet instance + */ + MetricRuleSet.create = function create(properties) { + return new MetricRuleSet(properties); + }; + + /** + * Encodes the specified MetricRuleSet message. Does not implicitly {@link querythrottler.MetricRuleSet.verify|verify} messages. + * @function encode + * @memberof querythrottler.MetricRuleSet + * @static + * @param {querythrottler.IMetricRuleSet} message MetricRuleSet message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MetricRuleSet.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.metric_rules != null && Object.hasOwnProperty.call(message, "metric_rules")) + for (let keys = Object.keys(message.metric_rules), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.querythrottler.MetricRule.encode(message.metric_rules[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + return writer; + }; + + /** + * Encodes the specified MetricRuleSet message, length delimited. Does not implicitly {@link querythrottler.MetricRuleSet.verify|verify} messages. + * @function encodeDelimited + * @memberof querythrottler.MetricRuleSet + * @static + * @param {querythrottler.IMetricRuleSet} message MetricRuleSet message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MetricRuleSet.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a MetricRuleSet message from the specified reader or buffer. + * @function decode + * @memberof querythrottler.MetricRuleSet + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {querythrottler.MetricRuleSet} MetricRuleSet + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MetricRuleSet.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.MetricRuleSet(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.metric_rules === $util.emptyObject) + message.metric_rules = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.querythrottler.MetricRule.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.metric_rules[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a MetricRuleSet message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {topodata.ExternalClusters} ExternalClusters + * @returns {querythrottler.MetricRuleSet} MetricRuleSet * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExternalClusters.decodeDelimited = function decodeDelimited(reader) { + MetricRuleSet.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExternalClusters message. + * Verifies a MetricRuleSet message. * @function verify - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExternalClusters.verify = function verify(message) { + MetricRuleSet.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.vitess_cluster != null && message.hasOwnProperty("vitess_cluster")) { - if (!Array.isArray(message.vitess_cluster)) - return "vitess_cluster: array expected"; - for (let i = 0; i < message.vitess_cluster.length; ++i) { - let error = $root.topodata.ExternalVitessCluster.verify(message.vitess_cluster[i]); + if (message.metric_rules != null && message.hasOwnProperty("metric_rules")) { + if (!$util.isObject(message.metric_rules)) + return "metric_rules: object expected"; + let key = Object.keys(message.metric_rules); + for (let i = 0; i < key.length; ++i) { + let error = $root.querythrottler.MetricRule.verify(message.metric_rules[key[i]]); if (error) - return "vitess_cluster." + error; + return "metric_rules." + error; } } return null; }; /** - * Creates an ExternalClusters message from a plain object. Also converts values to their respective internal types. + * Creates a MetricRuleSet message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @static * @param {Object.} object Plain object - * @returns {topodata.ExternalClusters} ExternalClusters + * @returns {querythrottler.MetricRuleSet} MetricRuleSet */ - ExternalClusters.fromObject = function fromObject(object) { - if (object instanceof $root.topodata.ExternalClusters) + MetricRuleSet.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.MetricRuleSet) return object; - let message = new $root.topodata.ExternalClusters(); - if (object.vitess_cluster) { - if (!Array.isArray(object.vitess_cluster)) - throw TypeError(".topodata.ExternalClusters.vitess_cluster: array expected"); - message.vitess_cluster = []; - for (let i = 0; i < object.vitess_cluster.length; ++i) { - if (typeof object.vitess_cluster[i] !== "object") - throw TypeError(".topodata.ExternalClusters.vitess_cluster: object expected"); - message.vitess_cluster[i] = $root.topodata.ExternalVitessCluster.fromObject(object.vitess_cluster[i]); + let message = new $root.querythrottler.MetricRuleSet(); + if (object.metric_rules) { + if (typeof object.metric_rules !== "object") + throw TypeError(".querythrottler.MetricRuleSet.metric_rules: object expected"); + message.metric_rules = {}; + for (let keys = Object.keys(object.metric_rules), i = 0; i < keys.length; ++i) { + if (typeof object.metric_rules[keys[i]] !== "object") + throw TypeError(".querythrottler.MetricRuleSet.metric_rules: object expected"); + message.metric_rules[keys[i]] = $root.querythrottler.MetricRule.fromObject(object.metric_rules[keys[i]]); } } return message; }; /** - * Creates a plain object from an ExternalClusters message. Also converts values to other types if specified. + * Creates a plain object from a MetricRuleSet message. Also converts values to other types if specified. * @function toObject - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @static - * @param {topodata.ExternalClusters} message ExternalClusters + * @param {querythrottler.MetricRuleSet} message MetricRuleSet * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExternalClusters.toObject = function toObject(message, options) { + MetricRuleSet.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.vitess_cluster = []; - if (message.vitess_cluster && message.vitess_cluster.length) { - object.vitess_cluster = []; - for (let j = 0; j < message.vitess_cluster.length; ++j) - object.vitess_cluster[j] = $root.topodata.ExternalVitessCluster.toObject(message.vitess_cluster[j], options); + if (options.objects || options.defaults) + object.metric_rules = {}; + let keys2; + if (message.metric_rules && (keys2 = Object.keys(message.metric_rules)).length) { + object.metric_rules = {}; + for (let j = 0; j < keys2.length; ++j) + object.metric_rules[keys2[j]] = $root.querythrottler.MetricRule.toObject(message.metric_rules[keys2[j]], options); } return object; }; /** - * Converts this ExternalClusters to JSON. + * Converts this MetricRuleSet to JSON. * @function toJSON - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @instance * @returns {Object.} JSON object */ - ExternalClusters.prototype.toJSON = function toJSON() { + MetricRuleSet.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExternalClusters + * Gets the default type url for MetricRuleSet * @function getTypeUrl - * @memberof topodata.ExternalClusters + * @memberof querythrottler.MetricRuleSet * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExternalClusters.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MetricRuleSet.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/topodata.ExternalClusters"; + return typeUrlPrefix + "/querythrottler.MetricRuleSet"; }; - return ExternalClusters; + return MetricRuleSet; })(); - return topodata; -})(); - -export const vtorcdata = $root.vtorcdata = (() => { - - /** - * Namespace vtorcdata. - * @exports vtorcdata - * @namespace - */ - const vtorcdata = {}; - - vtorcdata.Keyspace = (function() { + querythrottler.MetricRule = (function() { /** - * Properties of a Keyspace. - * @memberof vtorcdata - * @interface IKeyspace - * @property {boolean|null} [disable_emergency_reparent] Keyspace disable_emergency_reparent + * Properties of a MetricRule. + * @memberof querythrottler + * @interface IMetricRule + * @property {Array.|null} [thresholds] MetricRule thresholds */ /** - * Constructs a new Keyspace. - * @memberof vtorcdata - * @classdesc Represents a Keyspace. - * @implements IKeyspace + * Constructs a new MetricRule. + * @memberof querythrottler + * @classdesc Represents a MetricRule. + * @implements IMetricRule * @constructor - * @param {vtorcdata.IKeyspace=} [properties] Properties to set + * @param {querythrottler.IMetricRule=} [properties] Properties to set */ - function Keyspace(properties) { + function MetricRule(properties) { + this.thresholds = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -47783,75 +49305,78 @@ export const vtorcdata = $root.vtorcdata = (() => { } /** - * Keyspace disable_emergency_reparent. - * @member {boolean} disable_emergency_reparent - * @memberof vtorcdata.Keyspace + * MetricRule thresholds. + * @member {Array.} thresholds + * @memberof querythrottler.MetricRule * @instance */ - Keyspace.prototype.disable_emergency_reparent = false; + MetricRule.prototype.thresholds = $util.emptyArray; /** - * Creates a new Keyspace instance using the specified properties. + * Creates a new MetricRule instance using the specified properties. * @function create - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static - * @param {vtorcdata.IKeyspace=} [properties] Properties to set - * @returns {vtorcdata.Keyspace} Keyspace instance + * @param {querythrottler.IMetricRule=} [properties] Properties to set + * @returns {querythrottler.MetricRule} MetricRule instance */ - Keyspace.create = function create(properties) { - return new Keyspace(properties); + MetricRule.create = function create(properties) { + return new MetricRule(properties); }; /** - * Encodes the specified Keyspace message. Does not implicitly {@link vtorcdata.Keyspace.verify|verify} messages. + * Encodes the specified MetricRule message. Does not implicitly {@link querythrottler.MetricRule.verify|verify} messages. * @function encode - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static - * @param {vtorcdata.IKeyspace} message Keyspace message or plain object to encode + * @param {querythrottler.IMetricRule} message MetricRule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encode = function encode(message, writer) { + MetricRule.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.disable_emergency_reparent != null && Object.hasOwnProperty.call(message, "disable_emergency_reparent")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.disable_emergency_reparent); + if (message.thresholds != null && message.thresholds.length) + for (let i = 0; i < message.thresholds.length; ++i) + $root.querythrottler.ThrottleThreshold.encode(message.thresholds[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtorcdata.Keyspace.verify|verify} messages. + * Encodes the specified MetricRule message, length delimited. Does not implicitly {@link querythrottler.MetricRule.verify|verify} messages. * @function encodeDelimited - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static - * @param {vtorcdata.IKeyspace} message Keyspace message or plain object to encode + * @param {querythrottler.IMetricRule} message MetricRule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encodeDelimited = function encodeDelimited(message, writer) { + MetricRule.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Keyspace message from the specified reader or buffer. + * Decodes a MetricRule message from the specified reader or buffer. * @function decode - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtorcdata.Keyspace} Keyspace + * @returns {querythrottler.MetricRule} MetricRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decode = function decode(reader, length) { + MetricRule.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtorcdata.Keyspace(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.MetricRule(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.disable_emergency_reparent = reader.bool(); + if (!(message.thresholds && message.thresholds.length)) + message.thresholds = []; + message.thresholds.push($root.querythrottler.ThrottleThreshold.decode(reader, reader.uint32())); break; } default: @@ -47863,122 +49388,140 @@ export const vtorcdata = $root.vtorcdata = (() => { }; /** - * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * Decodes a MetricRule message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtorcdata.Keyspace} Keyspace + * @returns {querythrottler.MetricRule} MetricRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decodeDelimited = function decodeDelimited(reader) { + MetricRule.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Keyspace message. + * Verifies a MetricRule message. * @function verify - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Keyspace.verify = function verify(message) { + MetricRule.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) - if (typeof message.disable_emergency_reparent !== "boolean") - return "disable_emergency_reparent: boolean expected"; + if (message.thresholds != null && message.hasOwnProperty("thresholds")) { + if (!Array.isArray(message.thresholds)) + return "thresholds: array expected"; + for (let i = 0; i < message.thresholds.length; ++i) { + let error = $root.querythrottler.ThrottleThreshold.verify(message.thresholds[i]); + if (error) + return "thresholds." + error; + } + } return null; }; /** - * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * Creates a MetricRule message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static * @param {Object.} object Plain object - * @returns {vtorcdata.Keyspace} Keyspace + * @returns {querythrottler.MetricRule} MetricRule */ - Keyspace.fromObject = function fromObject(object) { - if (object instanceof $root.vtorcdata.Keyspace) + MetricRule.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.MetricRule) return object; - let message = new $root.vtorcdata.Keyspace(); - if (object.disable_emergency_reparent != null) - message.disable_emergency_reparent = Boolean(object.disable_emergency_reparent); + let message = new $root.querythrottler.MetricRule(); + if (object.thresholds) { + if (!Array.isArray(object.thresholds)) + throw TypeError(".querythrottler.MetricRule.thresholds: array expected"); + message.thresholds = []; + for (let i = 0; i < object.thresholds.length; ++i) { + if (typeof object.thresholds[i] !== "object") + throw TypeError(".querythrottler.MetricRule.thresholds: object expected"); + message.thresholds[i] = $root.querythrottler.ThrottleThreshold.fromObject(object.thresholds[i]); + } + } return message; }; /** - * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * Creates a plain object from a MetricRule message. Also converts values to other types if specified. * @function toObject - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static - * @param {vtorcdata.Keyspace} message Keyspace + * @param {querythrottler.MetricRule} message MetricRule * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Keyspace.toObject = function toObject(message, options) { + MetricRule.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.disable_emergency_reparent = false; - if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) - object.disable_emergency_reparent = message.disable_emergency_reparent; + if (options.arrays || options.defaults) + object.thresholds = []; + if (message.thresholds && message.thresholds.length) { + object.thresholds = []; + for (let j = 0; j < message.thresholds.length; ++j) + object.thresholds[j] = $root.querythrottler.ThrottleThreshold.toObject(message.thresholds[j], options); + } return object; }; /** - * Converts this Keyspace to JSON. + * Converts this MetricRule to JSON. * @function toJSON - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @instance * @returns {Object.} JSON object */ - Keyspace.prototype.toJSON = function toJSON() { + MetricRule.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Keyspace + * Gets the default type url for MetricRule * @function getTypeUrl - * @memberof vtorcdata.Keyspace + * @memberof querythrottler.MetricRule * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MetricRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtorcdata.Keyspace"; + return typeUrlPrefix + "/querythrottler.MetricRule"; }; - return Keyspace; + return MetricRule; })(); - vtorcdata.Shard = (function() { + querythrottler.ThrottleThreshold = (function() { /** - * Properties of a Shard. - * @memberof vtorcdata - * @interface IShard - * @property {boolean|null} [disable_emergency_reparent] Shard disable_emergency_reparent + * Properties of a ThrottleThreshold. + * @memberof querythrottler + * @interface IThrottleThreshold + * @property {number|null} [above] ThrottleThreshold above + * @property {number|null} [throttle] ThrottleThreshold throttle */ /** - * Constructs a new Shard. - * @memberof vtorcdata - * @classdesc Represents a Shard. - * @implements IShard + * Constructs a new ThrottleThreshold. + * @memberof querythrottler + * @classdesc Represents a ThrottleThreshold. + * @implements IThrottleThreshold * @constructor - * @param {vtorcdata.IShard=} [properties] Properties to set + * @param {querythrottler.IThrottleThreshold=} [properties] Properties to set */ - function Shard(properties) { + function ThrottleThreshold(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -47986,75 +49529,89 @@ export const vtorcdata = $root.vtorcdata = (() => { } /** - * Shard disable_emergency_reparent. - * @member {boolean} disable_emergency_reparent - * @memberof vtorcdata.Shard + * ThrottleThreshold above. + * @member {number} above + * @memberof querythrottler.ThrottleThreshold * @instance */ - Shard.prototype.disable_emergency_reparent = false; + ThrottleThreshold.prototype.above = 0; /** - * Creates a new Shard instance using the specified properties. + * ThrottleThreshold throttle. + * @member {number} throttle + * @memberof querythrottler.ThrottleThreshold + * @instance + */ + ThrottleThreshold.prototype.throttle = 0; + + /** + * Creates a new ThrottleThreshold instance using the specified properties. * @function create - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static - * @param {vtorcdata.IShard=} [properties] Properties to set - * @returns {vtorcdata.Shard} Shard instance + * @param {querythrottler.IThrottleThreshold=} [properties] Properties to set + * @returns {querythrottler.ThrottleThreshold} ThrottleThreshold instance */ - Shard.create = function create(properties) { - return new Shard(properties); + ThrottleThreshold.create = function create(properties) { + return new ThrottleThreshold(properties); }; /** - * Encodes the specified Shard message. Does not implicitly {@link vtorcdata.Shard.verify|verify} messages. + * Encodes the specified ThrottleThreshold message. Does not implicitly {@link querythrottler.ThrottleThreshold.verify|verify} messages. * @function encode - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static - * @param {vtorcdata.IShard} message Shard message or plain object to encode + * @param {querythrottler.IThrottleThreshold} message ThrottleThreshold message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Shard.encode = function encode(message, writer) { + ThrottleThreshold.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.disable_emergency_reparent != null && Object.hasOwnProperty.call(message, "disable_emergency_reparent")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.disable_emergency_reparent); + if (message.above != null && Object.hasOwnProperty.call(message, "above")) + writer.uint32(/* id 1, wireType 1 =*/9).double(message.above); + if (message.throttle != null && Object.hasOwnProperty.call(message, "throttle")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.throttle); return writer; }; /** - * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtorcdata.Shard.verify|verify} messages. + * Encodes the specified ThrottleThreshold message, length delimited. Does not implicitly {@link querythrottler.ThrottleThreshold.verify|verify} messages. * @function encodeDelimited - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static - * @param {vtorcdata.IShard} message Shard message or plain object to encode + * @param {querythrottler.IThrottleThreshold} message ThrottleThreshold message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Shard.encodeDelimited = function encodeDelimited(message, writer) { + ThrottleThreshold.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Shard message from the specified reader or buffer. + * Decodes a ThrottleThreshold message from the specified reader or buffer. * @function decode - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtorcdata.Shard} Shard + * @returns {querythrottler.ThrottleThreshold} ThrottleThreshold * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Shard.decode = function decode(reader, length) { + ThrottleThreshold.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtorcdata.Shard(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.querythrottler.ThrottleThreshold(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.disable_emergency_reparent = reader.bool(); + message.above = reader.double(); + break; + } + case 2: { + message.throttle = reader.int32(); break; } default: @@ -48066,105 +49623,114 @@ export const vtorcdata = $root.vtorcdata = (() => { }; /** - * Decodes a Shard message from the specified reader or buffer, length delimited. + * Decodes a ThrottleThreshold message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtorcdata.Shard} Shard + * @returns {querythrottler.ThrottleThreshold} ThrottleThreshold * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Shard.decodeDelimited = function decodeDelimited(reader) { + ThrottleThreshold.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Shard message. + * Verifies a ThrottleThreshold message. * @function verify - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Shard.verify = function verify(message) { + ThrottleThreshold.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) - if (typeof message.disable_emergency_reparent !== "boolean") - return "disable_emergency_reparent: boolean expected"; + if (message.above != null && message.hasOwnProperty("above")) + if (typeof message.above !== "number") + return "above: number expected"; + if (message.throttle != null && message.hasOwnProperty("throttle")) + if (!$util.isInteger(message.throttle)) + return "throttle: integer expected"; return null; }; /** - * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * Creates a ThrottleThreshold message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static * @param {Object.} object Plain object - * @returns {vtorcdata.Shard} Shard + * @returns {querythrottler.ThrottleThreshold} ThrottleThreshold */ - Shard.fromObject = function fromObject(object) { - if (object instanceof $root.vtorcdata.Shard) + ThrottleThreshold.fromObject = function fromObject(object) { + if (object instanceof $root.querythrottler.ThrottleThreshold) return object; - let message = new $root.vtorcdata.Shard(); - if (object.disable_emergency_reparent != null) - message.disable_emergency_reparent = Boolean(object.disable_emergency_reparent); + let message = new $root.querythrottler.ThrottleThreshold(); + if (object.above != null) + message.above = Number(object.above); + if (object.throttle != null) + message.throttle = object.throttle | 0; return message; }; /** - * Creates a plain object from a Shard message. Also converts values to other types if specified. + * Creates a plain object from a ThrottleThreshold message. Also converts values to other types if specified. * @function toObject - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static - * @param {vtorcdata.Shard} message Shard + * @param {querythrottler.ThrottleThreshold} message ThrottleThreshold * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Shard.toObject = function toObject(message, options) { + ThrottleThreshold.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.disable_emergency_reparent = false; - if (message.disable_emergency_reparent != null && message.hasOwnProperty("disable_emergency_reparent")) - object.disable_emergency_reparent = message.disable_emergency_reparent; + if (options.defaults) { + object.above = 0; + object.throttle = 0; + } + if (message.above != null && message.hasOwnProperty("above")) + object.above = options.json && !isFinite(message.above) ? String(message.above) : message.above; + if (message.throttle != null && message.hasOwnProperty("throttle")) + object.throttle = message.throttle; return object; }; /** - * Converts this Shard to JSON. + * Converts this ThrottleThreshold to JSON. * @function toJSON - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @instance * @returns {Object.} JSON object */ - Shard.prototype.toJSON = function toJSON() { + ThrottleThreshold.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Shard + * Gets the default type url for ThrottleThreshold * @function getTypeUrl - * @memberof vtorcdata.Shard + * @memberof querythrottler.ThrottleThreshold * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Shard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ThrottleThreshold.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtorcdata.Shard"; + return typeUrlPrefix + "/querythrottler.ThrottleThreshold"; }; - return Shard; + return ThrottleThreshold; })(); - return vtorcdata; + return querythrottler; })(); export const vtrpc = $root.vtrpc = (() => { @@ -68939,6 +70505,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * Properties of a DemotePrimaryRequest. * @memberof tabletmanagerdata * @interface IDemotePrimaryRequest + * @property {boolean|null} [force] DemotePrimaryRequest force */ /** @@ -68956,6 +70523,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { this[keys[i]] = properties[keys[i]]; } + /** + * DemotePrimaryRequest force. + * @member {boolean} force + * @memberof tabletmanagerdata.DemotePrimaryRequest + * @instance + */ + DemotePrimaryRequest.prototype.force = false; + /** * Creates a new DemotePrimaryRequest instance using the specified properties. * @function create @@ -68980,6 +70555,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { DemotePrimaryRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.force); return writer; }; @@ -69014,6 +70591,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.force = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -69049,6 +70630,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { DemotePrimaryRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; return null; }; @@ -69063,7 +70647,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { DemotePrimaryRequest.fromObject = function fromObject(object) { if (object instanceof $root.tabletmanagerdata.DemotePrimaryRequest) return object; - return new $root.tabletmanagerdata.DemotePrimaryRequest(); + let message = new $root.tabletmanagerdata.DemotePrimaryRequest(); + if (object.force != null) + message.force = Boolean(object.force); + return message; }; /** @@ -69075,8 +70662,15 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DemotePrimaryRequest.toObject = function toObject() { - return {}; + DemotePrimaryRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.force = false; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + return object; }; /** diff --git a/web/vtadmin/tests/handlers.ts b/web/vtadmin/tests/handlers.ts index cc3b63c31fc..1b0993eae14 100644 --- a/web/vtadmin/tests/handlers.ts +++ b/web/vtadmin/tests/handlers.ts @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { graphql, rest } from 'msw' // Add handlers here as needed for mocking the test server export const handlers = [