+98
.claude/agents/code-reviewer.md
+98
.claude/agents/code-reviewer.md
···
1
+
---
2
+
name: code-reviewer-v1
3
+
description: Call this agent to review staged and unstaged code in the repository. It evaluates code quality and security.
4
+
tools: Bash, Glob, Grep, LS, Read, WebFetch, TodoWrite, WebSearch, BashOutput, KillBash, mcp__git-mcp-server__git_add, mcp__git-mcp-server__git_branch, mcp__git-mcp-server__git_checkout, mcp__git-mcp-server__git_cherry_pick, mcp__git-mcp-server__git_clean, mcp__git-mcp-server__git_clear_working_dir, mcp__git-mcp-server__git_clone, mcp__git-mcp-server__git_commit, mcp__git-mcp-server__git_diff, mcp__git-mcp-server__git_fetch, mcp__git-mcp-server__git_init, mcp__git-mcp-server__git_log, mcp__git-mcp-server__git_merge, mcp__git-mcp-server__git_pull, mcp__git-mcp-server__git_push, mcp__git-mcp-server__git_rebase, mcp__git-mcp-server__git_remote, mcp__git-mcp-server__git_reset, mcp__git-mcp-server__git_set_working_dir, mcp__git-mcp-server__git_show, mcp__git-mcp-server__git_stash, mcp__git-mcp-server__git_status, mcp__git-mcp-server__git_tag, mcp__git-mcp-server__git_worktree, mcp__git-mcp-server__git_wrapup_instructions
5
+
color: green
6
+
---
7
+
**All imports in this document should be treated as if they were in the main prompt file.**
8
+
9
+
You are a comprehensive code review agent examining a piece of code that has been created by the main agent that calls you. Your role is to provide thorough, constructive feedback that ensures code quality, maintainability, and alignment with established patterns and decisions, while also suggesting ways to improve both the code in question but also our stored memory bank for future iterations.
10
+
11
+
The agent that calls you may also provide you with a Task Master task definition. Your evaluation of the output should take into account this task definition and ensure that the provided solution meets our goals.
12
+
13
+
## Review Methodology
14
+
15
+
### Phase 1: Context Gathering
16
+
1. Check the repository's Git status, both staged and unstaged
17
+
2. Examine the full diff to understand what's changing
18
+
4. Search the codebase for similar patterns or implementations that might be reusable
19
+
20
+
### Phase 2: Comprehensive Review
21
+
#### Code Quality & Patterns
22
+
- **Compilation**: For all touched packages and apps, make sure the code compiles and all tests pass
23
+
- **DRY Violations**: Search for similar code patterns elsewhere in the codebase
24
+
- **Consistency**: Does this follow established patterns in the project?
25
+
- **Abstraction Level**: Is this the right level of generalization?
26
+
- **Naming**: Are names clear, consistent, and follow project conventions?
27
+
28
+
#### Engineering Excellence
29
+
- **Error Handling**: How are errors caught, logged, and recovered from?
30
+
- **Edge Cases**: What happens with null/undefined/empty/malformed inputs?
31
+
- **Performance**: Will this scale with realistic data volumes?
32
+
- Consider cases where an iterative approach is being done when a parallel approach would be better
33
+
- Example: the original implementation of Fastify health checks had try-catch blocks all in a row; a good suggestion would be to make these into functions called with `Promise.allSettled`
34
+
- **Security**: Are there injection risks, exposed secrets, or auth bypasses?
35
+
- **Testing**: Are critical paths tested? Are tests meaningful?
36
+
- Our system is entirely built around a dependency injector; we can create (and make DRY and reusable) stub implementations of our services in order to allow for more integrated tests. Recommend this proactively.
37
+
38
+
#### Integration & Dependencies
39
+
- **Codebase Fit**: Does this integrate well with existing modules?
40
+
- **Dependencies**: Are we adding unnecessary dependencies when existing utilities could work?
41
+
- **Side Effects**: What other parts of the system might this affect?
42
+
43
+
### Phase 3: Knowledge Management Assessment
44
+
45
+
Identify knowledge gaps and opportunities:
46
+
47
+
#### Flag for Documentation
48
+
- **New Techniques**: "This retry mechanism is well-implemented and reusable.
49
+
- **Missing Decisions**: "Choosing WebSockets over SSE here seems like an architectural decision that should be recorded"
50
+
- **Complex Logic**: "This order processing logic should be captured as a detail entry"
51
+
- **Implementation doesn't match product concepts**:
52
+
53
+
## Review Output Format
54
+
55
+
Structure your review as:
56
+
57
+
### Summary
58
+
Brief overview of the changes and overall assessment
59
+
60
+
### Critical Issues 🔴
61
+
Must-fix problems (security, bugs, broken functionality)
62
+
63
+
### Important Suggestions 🟡
64
+
Should-fix issues (performance, maintainability, patterns)
65
+
66
+
### Minor Improvements 🟢
67
+
Nice-to-have enhancements (style, optimization, clarity)
68
+
69
+
### Knowledge Management
70
+
- **Alignment Check**: How this aligns with existing knowledge
71
+
- **Documentation Opportunities**: What should be added to Basic Memory
72
+
- **Updates Needed**: What existing entries need updating
73
+
74
+
### Code Reuse Opportunities
75
+
Specific suggestions for using existing code instead of reimplementing
76
+
77
+
## Review Tone
78
+
79
+
Be constructive and specific:
80
+
- ✅ "Consider using the cursor pagination technique from `src/api/utils.ts:142` instead"
81
+
- ❌ "This pagination is wrong"
82
+
83
+
- ✅ "This deviates from our decision to use Zod for validation. If intentional, please update the decision entry"
84
+
- ❌ "You should use Zod"
85
+
86
+
- ✅ "Great implementation of circuit breaker! This is reusable - worth documenting"
87
+
- ❌ "Good code"
88
+
89
+
## Special Instructions
90
+
91
+
1. **Search Extensively**: Use Grep and Glob liberally to find similar code patterns
92
+
2. **Reference Specifically**: Include file paths and line numbers in feedback
93
+
3. **Suggest Alternatives**: Don't just identify problems - propose solutions
94
+
4. **Prioritize Feedback**: Focus on what matters most for safety and maintainability
95
+
5. **Learn from History**: Check Basic Memory for past decisions and patterns
96
+
6. **Think Long-term**: Consider how this code will age and be maintained
97
+
98
+
Remember: Your goal is not just to find problems, but to help maintain a coherent, well-documented, and maintainable codebase that builds on established knowledge and patterns.
+174
.claude/mcp-descriptions/git-mcp.mdc
+174
.claude/mcp-descriptions/git-mcp.mdc
···
1
+
---
2
+
description:
3
+
globs:
4
+
alwaysApply: true
5
+
---
6
+
# LLM Agent Guidelines for `@cyanheads/git-mcp-server`
7
+
8
+
This document provides a concise overview of the available Git tools, designed to be used as a quick-reference guide for an LLM coding assistant.
9
+
10
+
### Guiding Principles for the LLM Agent
11
+
12
+
* **Human-in-the-Loop**: Do not commit any changes without explicit permission from a human operator.
13
+
* **Safety First**: Never use potentially destructive commands like `git_reset`, `git_clean`, or `git_push` with the `force` option enabled. These operations can lead to permanent data loss.
14
+
* **Session Context is Key**: Always start your workflow by setting a working directory with `git_set_working_dir`. Subsequent commands can then use `.` as the path, which is more efficient. Use `git_clear_working_dir` when a session is complete.
15
+
* **Conventional Commits**: When using `git_commit`, write clear, concise messages following the Conventional Commits format: `type(scope): subject`. The tool's description provides detailed guidance.
16
+
* **Review Before Committing**: Before committing, always use `git_status` and `git_diff` to review the changes. This ensures you create logical, atomic commits.
17
+
18
+
---
19
+
20
+
## Commonly Used Tools
21
+
22
+
These are the essential tools for day-to-day version control tasks.
23
+
24
+
### `git_set_working_dir`
25
+
26
+
* **Description**: Sets the default working directory for the current session. Subsequent Git tool calls can use `.` for the `path`, which will resolve to this directory. **This should be the first tool you use in any workflow.**
27
+
* **When to Use**: At the beginning of any task that involves a Git repository to establish context for all subsequent commands.
28
+
* **Input Parameters**:
29
+
30
+
| Parameter | Type | Description |
31
+
| :----------------------- | :------ | :--------------------------------------------------------------------------- |
32
+
| `path` | string | The **absolute path** to set as the default working directory. |
33
+
| `validateGitRepo` | boolean | Validate that the path is a Git repository. Defaults to `true`. |
34
+
| `initializeIfNotPresent` | boolean | If not a Git repository, initialize it with 'git init'. Defaults to `false`. |
35
+
36
+
### `git_status`
37
+
38
+
* **Description**: Retrieves the status of the repository, showing staged, unstaged, and untracked files.
39
+
* **When to Use**: Use this frequently to check the state of the repository before staging changes, after pulling from a remote, or before committing.
40
+
* **Input Parameters**:
41
+
42
+
| Parameter | Type | Description |
43
+
| :-------- | :----- | :------------------------------------------------------------------------------------------------------------------------------------ |
44
+
| `path` | string | Path to the Git repository. Defaults to `.` (the session's working directory). |
45
+
46
+
### `git_add`
47
+
48
+
* **Description**: Stages changes, adding them to the index before committing.
49
+
* **When to Use**: After making changes to files and before you are ready to commit them.
50
+
* **Input Parameters**:
51
+
52
+
| Parameter | Type | Description |
53
+
| :-------- | :------------------- | :------------------------------------------------------------------------------------------------------------------------------------ |
54
+
| `path` | string | Path to the Git repository. Defaults to the directory set via `git_set_working_dir`. |
55
+
| `files` | string \| string\[] | Files or patterns to stage. Defaults to all changes (`.`). |
56
+
57
+
### `git_commit`
58
+
59
+
* **Description**: Commits staged changes to the repository with a descriptive message.
60
+
* **When to Use**: After staging a logical group of changes with `git_add` and receiving approval from the operator to commit.
61
+
* **Input Parameters**:
62
+
63
+
| Parameter | Type | Description |
64
+
| :----------- | :------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- |
65
+
| `path` | string | Path to the Git repository. |
66
+
| `message` | string | The commit message. |
67
+
| `author` | object | Override the commit author (`{ name: string, email: string }`). |
68
+
| `filesToStage`| string\[] | An array of file paths to stage before committing. |
69
+
70
+
### `git_log`
71
+
72
+
* **Description**: Shows the commit history. Can be filtered by author, date, or branch.
73
+
* **When to Use**: To review recent changes, find a specific commit, or understand the history of a file or branch.
74
+
* **Input Parameters**:
75
+
76
+
| Parameter | Type | Description |
77
+
| :------------- | :------ | :------------------------------------------------------------------- |
78
+
| `path` | string | Path to the Git repository. |
79
+
| `maxCount` | number | Limit the number of commits to output. |
80
+
| `author` | string | Filter commits by a specific author. |
81
+
| `since` | string | Show commits more recent than a specific date. |
82
+
| `until` | string | Show commits older than a specific date. |
83
+
| `branchOrFile` | string | Show logs for a specific branch, tag, or file path. |
84
+
| `showSignature`| boolean | Show signature verification status for commits. |
85
+
86
+
### `git_diff`
87
+
88
+
* **Description**: Shows changes between commits, the working tree, etc.
89
+
* **When to Use**: To review unstaged changes before adding, or to see the difference between two branches or commits.
90
+
* **Input Parameters**:
91
+
92
+
| Parameter | Type | Description |
93
+
| :--------------- | :------ | :------------------------------------------------------------------------------- |
94
+
| `path` | string | Path to the Git repository. |
95
+
| `commit1` | string | First commit, branch, or ref for comparison. |
96
+
| `commit2` | string | Second commit, branch, or ref for comparison. |
97
+
| `staged` | boolean | Show diff of staged changes. |
98
+
| `file` | string | Limit the diff to a specific file. |
99
+
| `includeUntracked`| boolean | Include untracked files in the diff output. |
100
+
101
+
### `git_branch`
102
+
103
+
* **Description**: Manages branches: list, create, delete, and rename.
104
+
* **When to Use**: To see what branches are available, create a new branch for a feature or bugfix, or clean up old branches. DO NOT do this without human operator confirmation.
105
+
* **Input Parameters**:
106
+
107
+
| Parameter | Type | Description |
108
+
| :------------ | :------ | :------------------------------------------------------------------- |
109
+
| `path` | string | Path to the Git repository. |
110
+
| `mode` | enum | The operation: `list`, `create`, `delete`, `rename`, `show-current`. |
111
+
| `branchName` | string | Name of the branch for create/delete/rename operations. |
112
+
| `newBranchName`| string | The new name for the branch when renaming. |
113
+
| `startPoint` | string | The starting point for a new branch. |
114
+
| `force` | boolean | Force the operation (e.g., deleting an unmerged branch). |
115
+
| `all` | boolean | List all branches (local and remote). |
116
+
| `remote` | boolean | Act on remote-tracking branches. |
117
+
118
+
### `git_checkout`
119
+
120
+
* **Description**: Switches branches or restores working tree files.
121
+
* **When to Use**: To start working on a different branch or to discard changes in a specific file. DO NOT do this without human operator confirmation.
122
+
* **Input Parameters**:
123
+
124
+
| Parameter | Type | Description |
125
+
| :----------- | :------ | :---------------------------------------------------------------- |
126
+
| `path` | string | Path to the Git repository. |
127
+
| `branchOrPath`| string | The branch, commit, tag, or file path to checkout. |
128
+
| `newBranch` | string | Create a new branch before checking out. |
129
+
| `force` | boolean | Force checkout, discarding local changes. |
130
+
131
+
### `git_pull`
132
+
133
+
* **Description**: Fetches from and integrates with a remote repository or a local branch.
134
+
* **When to Use**: To update your current local branch with changes from its remote counterpart. DO NOT do this without human operator confirmation.
135
+
* **Input Parameters**:
136
+
137
+
| Parameter | Type | Description |
138
+
| :-------- | :------ | :----------------------------------------------------------------- |
139
+
| `path` | string | Path to the Git repository. |
140
+
| `remote` | string | The remote repository to pull from (e.g., 'origin'). |
141
+
| `branch` | string | The remote branch to pull. |
142
+
| `rebase` | boolean | Use 'git pull --rebase' instead of merge. |
143
+
| `ffOnly` | boolean | Only allow fast-forward merges. |
144
+
145
+
### `git_push`
146
+
147
+
* **Description**: Updates remote refs with local changes.
148
+
* **When to Use**: After committing your changes locally, use this to share them on the remote repository. DO NOT do this without human operator confirmation.
149
+
* **Input Parameters**:
150
+
151
+
| Parameter | Type | Description |
152
+
| :----------- | :------ | :----------------------------------------------------------- |
153
+
| `path` | string | Path to the Git repository. |
154
+
| `remote` | string | The remote repository to push to. |
155
+
| `branch` | string | The local branch to push. |
156
+
| `remoteBranch`| string | The remote branch to push to. |
157
+
| `force` | boolean | Force the push (use with caution). |
158
+
| `forceWithLease`| boolean | Force push only if remote ref is as expected. |
159
+
| `setUpstream`| boolean | Set the upstream tracking configuration. |
160
+
| `tags` | boolean | Push all tags. |
161
+
| `delete` | boolean | Delete the remote branch. |
162
+
163
+
---
164
+
165
+
## Complex Situations
166
+
167
+
If you encounter a situation where you believe a more advanced or potentially destructive tool is needed (such as `git rebase`, `git reset`, `git cherry-pick`, or `git clean`), **do not proceed automatically**.
168
+
169
+
Instead, you should:
170
+
171
+
1. **Pause execution.**
172
+
2. **Explain the situation** to the human operator.
173
+
3. **State which advanced Git operation you think is necessary and why.**
174
+
4. **Await explicit instruction** from the operator before taking any further action.
+16
.claude/settings.local.json
+16
.claude/settings.local.json
···
1
+
{
2
+
"permissions": {
3
+
"allow": [
4
+
"mcp__git-mcp-server__git_status",
5
+
"mcp__git-mcp-server__git_diff",
6
+
"mcp__git-mcp-server__git_set_working_dir"
7
+
],
8
+
"ask": [
9
+
"curl"
10
+
]
11
+
},
12
+
"enableAllProjectMcpServers": true,
13
+
"enabledMcpjsonServers": [
14
+
"git-mcp-server"
15
+
]
16
+
}
+13
.mcp.json
+13
.mcp.json
+105
CLAUDE.md
+105
CLAUDE.md
···
1
+
# Claude Code Instructions
2
+
3
+
**All imports in this document should be treated as if they were in the main prompt file.**
4
+
5
+
## MCP Orientation Instructions
6
+
7
+
@.claude/mcp-descriptions/github-mcp.mdc
8
+
9
+
NEVER USE A COMMAND-LINE TOOL WHEN AN MCP TOOL IS AVAILABLE. IF YOU THINK AN MCP TOOL IS MALFUNCTIONING AND CANNOT OTHERWISE CONTINUE, STOP AND ASK THE HUMAN OPERATOR FOR ASSISTANCE.
10
+
11
+
## Development Commands
12
+
13
+
### Running the Application
14
+
15
+
- `bun run start` - Run the main application (production mode)
16
+
- `bun run dev` - Run in development mode with file watching
17
+
- `bun i` - Install dependencies
18
+
19
+
### Code Quality
20
+
21
+
- `bun run format` - Format code using Prettier
22
+
- `bun run lint` - Run ESLint to check for issues
23
+
- `bun run lint:fix` - Automatically fix ESLint issues where possible
24
+
25
+
### Docker Deployment
26
+
27
+
- `docker build -pull -t skywatch-tools .` - Build Docker image
28
+
- `docker run -d -p 4101:4101 skywatch-autolabeler` - Run container
29
+
30
+
## Architecture Overview
31
+
32
+
This is a TypeScript rewrite of a Bash-based Bluesky content moderation system for the skywatch.blue independent labeler. The application monitors the Bluesky firehose in real-time and automatically applies labels to content that meets specific moderation criteria.
33
+
34
+
### Core Components
35
+
36
+
- **`main.ts`** - Entry point that sets up Jetstream WebSocket connection to monitor Bluesky firehose events (posts, profiles, handles, starter packs)
37
+
- **`agent.ts`** - Configures the AtpAgent for interacting with Ozone PDS for labeling operations
38
+
- **`constants.ts`** - Contains all moderation check definitions (PROFILE_CHECKS, POST_CHECKS, HANDLE_CHECKS)
39
+
- **`config.ts`** - Environment variable configuration and application settings
40
+
- **Check modules** - Individual modules for different content types:
41
+
- `checkPosts.ts` - Analyzes post content and URLs
42
+
- `checkHandles.ts` - Validates user handles
43
+
- `checkProfiles.ts` - Examines profile descriptions and display names
44
+
- `checkStarterPack.ts` - Reviews starter pack content
45
+
46
+
### Moderation Check System
47
+
48
+
The system uses a `Checks` interface to define moderation rules with the following properties:
49
+
50
+
- `label` - The label to apply when content matches
51
+
- `check` - RegExp pattern to match against content
52
+
- `whitelist` - Optional RegExp to exempt certain content
53
+
- `ignoredDIDs` - Array of DIDs to skip for this check
54
+
- `reportAcct/commentAcct/toLabel` - Actions to take when content matches
55
+
56
+
### Environment Configuration
57
+
58
+
The application requires several environment variables:
59
+
60
+
- Bluesky credentials (`BSKY_HANDLE`, `BSKY_PASSWORD`)
61
+
- Ozone server configuration (`OZONE_URL`, `OZONE_PDS`)
62
+
- Optional: firehose URL, ports, rate limiting settings
63
+
64
+
### Data Flow
65
+
66
+
1. Jetstream receives events from Bluesky firehose
67
+
2. Events are categorized by type (post, profile, handle, starter pack)
68
+
3. Appropriate check functions validate content against defined patterns
69
+
4. Matching content triggers labeling actions via Ozone PDS
70
+
5. Cursor position is periodically saved for resumption after restart
71
+
72
+
### Development Notes
73
+
74
+
- Uses Bun as the runtime and package manager
75
+
- Built with modern TypeScript and ESNext modules
76
+
- Implements rate limiting and error handling for API calls
77
+
- Supports both labeling and reporting workflows
78
+
- Includes metrics server on port 4101 for monitoring
79
+
80
+
See `src/developing_checks.md` for detailed instructions on creating new moderation checks.
81
+
82
+
## TODO
83
+
84
+
The code-reviewer has completed a comprehensive review of the codebase and identified several critical issues that need immediate attention:
85
+
86
+
Immediate Blocking Issues
87
+
88
+
- Missing constants.ts file (only example exists)
89
+
- Unsafe type assertions in main.ts:152,158
90
+
- Inadequate error handling for async operations
91
+
92
+
High Priority Security & Reliability Concerns
93
+
94
+
- Hardcoded DIDs should be moved to environment variables
95
+
- Missing structured error handling and logging
96
+
- No environment variable validation at startup
97
+
98
+
Medium Priority Code Quality Issues
99
+
100
+
- Duplicate profile checking logic needs refactoring
101
+
- ESLint configuration needs TypeScript updates
102
+
- Missing comprehensive test suite
103
+
104
+
The reviewer noted that while the modular architecture is well-designed, there are critical execution flaws that must be addressed before this
105
+
can be safely deployed to production.
+14
-14
README.md
+14
-14
README.md
···
87
87
88
88
The following environment variables are used for configuration:
89
89
90
-
| Variable | Description | Default |
91
-
| ------------------------ | ---------------------------------------------------------------- | ----------------------------------------- |
92
-
| `DID` | The DID of your moderation service for atproto-proxy headers. | `""` |
93
-
| `OZONE_URL` | The URL of the Ozone service. | `""` |
94
-
| `OZONE_PDS` | The Public Downstream Service for Ozone. | `""` |
95
-
| `BSKY_HANDLE` | The handle (username) of the bot's Bluesky account. | `""` |
96
-
| `BSKY_PASSWORD` | The app password for the bot's Bluesky account. | `""` |
97
-
| `HOST` | The host on which the server runs. | `127.0.0.1` |
98
-
| `PORT` | The port for the main application (currently unused). | `4100` |
99
-
| `METRICS_PORT` | The port for the Prometheus metrics server. | `4101` |
90
+
| Variable | Description | Default |
91
+
| ------------------------ | ---------------------------------------------------------------- | -------------------------------------------------------------- |
92
+
| `DID` | The DID of your moderation service for atproto-proxy headers. | `""` |
93
+
| `OZONE_URL` | The URL of the Ozone service. | `""` |
94
+
| `OZONE_PDS` | The Public Downstream Service for Ozone. | `""` |
95
+
| `BSKY_HANDLE` | The handle (username) of the bot's Bluesky account. | `""` |
96
+
| `BSKY_PASSWORD` | The app password for the bot's Bluesky account. | `""` |
97
+
| `HOST` | The host on which the server runs. | `127.0.0.1` |
98
+
| `PORT` | The port for the main application (currently unused). | `4100` |
99
+
| `METRICS_PORT` | The port for the Prometheus metrics server. | `4101` |
100
100
| `FIREHOSE_URL` | The WebSocket URL for the Bluesky firehose. | `FIREHOSE_URL=wss://jetstream1.us-east.bsky.network/subscribe` |
101
-
| `CURSOR_UPDATE_INTERVAL` | How often to save the firehose cursor to disk (in milliseconds). | `60000` |
102
-
| `LABEL_LIMIT` | (Optional) API call limit for labeling. | `undefined` |
103
-
| `LABEL_LIMIT_WAIT` | (Optional) Wait time when label limit is hit. | `undefined` |
104
-
| `LOG_LEVEL` | The logging level. | `info` |
101
+
| `CURSOR_UPDATE_INTERVAL` | How often to save the firehose cursor to disk (in milliseconds). | `60000` |
102
+
| `LABEL_LIMIT` | (Optional) API call limit for labeling. | `undefined` |
103
+
| `LABEL_LIMIT_WAIT` | (Optional) Wait time when label limit is hit. | `undefined` |
104
+
| `LOG_LEVEL` | The logging level. | `info` |
bun.lockb
bun.lockb
This is a binary file and will not be displayed.
+115
eslint.config.mjs
+115
eslint.config.mjs
···
1
+
import eslint from "@eslint/js";
2
+
import tseslint from "typescript-eslint";
3
+
import stylistic from "@stylistic/eslint-plugin";
4
+
import prettier from "eslint-config-prettier";
5
+
import importPlugin from "eslint-plugin-import";
6
+
7
+
export default tseslint.config(
8
+
eslint.configs.recommended,
9
+
...tseslint.configs.strictTypeChecked,
10
+
...tseslint.configs.stylisticTypeChecked,
11
+
prettier,
12
+
{
13
+
languageOptions: {
14
+
parserOptions: {
15
+
project: "./tsconfig.json",
16
+
tsconfigRootDir: import.meta.dirname,
17
+
},
18
+
},
19
+
},
20
+
{
21
+
plugins: {
22
+
"@stylistic": stylistic,
23
+
import: importPlugin,
24
+
},
25
+
rules: {
26
+
// TypeScript specific rules
27
+
"@typescript-eslint/no-unused-vars": [
28
+
"error",
29
+
{ argsIgnorePattern: "^_" },
30
+
],
31
+
"@typescript-eslint/no-explicit-any": "error",
32
+
"@typescript-eslint/no-unsafe-assignment": "error",
33
+
"@typescript-eslint/no-unsafe-member-access": "error",
34
+
"@typescript-eslint/no-unsafe-call": "error",
35
+
"@typescript-eslint/no-unsafe-return": "error",
36
+
"@typescript-eslint/no-unsafe-argument": "error",
37
+
"@typescript-eslint/prefer-nullish-coalescing": "error",
38
+
"@typescript-eslint/prefer-optional-chain": "error",
39
+
"@typescript-eslint/no-non-null-assertion": "error",
40
+
"@typescript-eslint/consistent-type-imports": "error",
41
+
"@typescript-eslint/consistent-type-exports": "error",
42
+
"@typescript-eslint/no-import-type-side-effects": "error",
43
+
44
+
// General code quality
45
+
"no-console": "warn",
46
+
"no-debugger": "error",
47
+
"no-var": "error",
48
+
"prefer-const": "error",
49
+
"prefer-template": "error",
50
+
"object-shorthand": "error",
51
+
"prefer-destructuring": ["error", { object: true, array: false }],
52
+
53
+
// Import rules
54
+
"import/order": [
55
+
"error",
56
+
{
57
+
groups: [
58
+
"builtin",
59
+
"external",
60
+
"internal",
61
+
"parent",
62
+
"sibling",
63
+
"index",
64
+
],
65
+
"newlines-between": "always",
66
+
alphabetize: { order: "asc", caseInsensitive: true },
67
+
},
68
+
],
69
+
"import/no-duplicates": "error",
70
+
"import/no-unresolved": "off", // TypeScript handles this
71
+
72
+
// Security-focused rules
73
+
"no-eval": "error",
74
+
"no-implied-eval": "error",
75
+
"no-new-func": "error",
76
+
"no-script-url": "error",
77
+
78
+
// Error handling
79
+
"no-empty-pattern": "error",
80
+
"no-fallthrough": "error",
81
+
"no-unreachable": "error",
82
+
"no-unreachable-loop": "error",
83
+
84
+
// Style preferences
85
+
"@stylistic/indent": ["error", 2],
86
+
"@stylistic/quotes": ["error", "single"],
87
+
"@stylistic/semi": ["error", "always"],
88
+
//"@stylistic/comma-dangle": ["error", "es5"],
89
+
"@stylistic/object-curly-spacing": ["error", "always"],
90
+
"@stylistic/array-bracket-spacing": ["error", "never"],
91
+
"@stylistic/space-before-function-paren": [
92
+
"error",
93
+
{
94
+
anonymous: "always",
95
+
named: "never",
96
+
asyncArrow: "always",
97
+
},
98
+
],
99
+
},
100
+
},
101
+
{
102
+
files: ["**/*.js", "**/*.mjs"],
103
+
...tseslint.configs.disableTypeChecked,
104
+
},
105
+
{
106
+
ignores: [
107
+
"node_modules/",
108
+
"dist/",
109
+
"build/",
110
+
"*.config.js",
111
+
"*.config.mjs",
112
+
"coverage/",
113
+
],
114
+
},
115
+
);
+9
package.json
+9
package.json
···
15
15
},
16
16
"devDependencies": {
17
17
"@eslint/js": "^9.29.0",
18
+
"@stylistic/eslint-plugin": "^5.2.3",
19
+
"@typescript-eslint/eslint-plugin": "^6.10.0",
20
+
"@typescript-eslint/parser": "^6.10.0",
21
+
"@eslint/compat": "^1.3.2",
22
+
"@eslint/eslintrc": "^3.3.1",
23
+
"eslint-config-prettier": "^10.1.8",
24
+
"eslint-plugin-import": "^2.32.0",
25
+
"eslint-plugin-prettier": "^5.5.4",
18
26
"@trivago/prettier-plugin-sort-imports": "^4.3.0",
19
27
"@types/better-sqlite3": "^7.6.13",
20
28
"@types/eslint__js": "^8.42.3",
···
46
54
"pino": "^9.6.0",
47
55
"pino-pretty": "^13.0.0",
48
56
"prom-client": "^15.1.3",
57
+
"stylistic/comma-trailing": "stylistic/comma-trailing",
49
58
"undici": "^7.8.0"
50
59
}
51
60
}
+3
-1
src/agent.ts
+3
-1
src/agent.ts
···
1
+
import { AtpAgent } from "@atproto/api";
2
+
1
3
import { setGlobalDispatcher, Agent as Agent } from "undici";
4
+
2
5
setGlobalDispatcher(new Agent({ connect: { timeout: 20_000 } }));
3
6
import { BSKY_HANDLE, BSKY_PASSWORD, OZONE_PDS } from "./config.js";
4
-
import { AtpAgent } from "@atproto/api";
5
7
6
8
export const agent = new AtpAgent({
7
9
service: `https://${OZONE_PDS}`,
+3
-3
src/checkHandles.ts
+3
-3
src/checkHandles.ts
···
1
-
import { HANDLE_CHECKS } from "./constants.js";
2
-
import logger from "./logger.js";
1
+
import { HANDLE_CHECKS } from './constants.js';
2
+
import logger from './logger.js';
3
3
import {
4
4
createAccountReport,
5
5
createAccountComment,
6
6
createAccountLabel,
7
-
} from "./moderation.js";
7
+
} from './moderation.js';
8
8
9
9
export const checkHandle = async (
10
10
did: string,
+6
-6
src/checkPosts.ts
+6
-6
src/checkPosts.ts
···
1
-
import { LINK_SHORTENER, POST_CHECKS, langs } from "./constants.js";
2
-
import { Post } from "./types.js";
3
-
import logger from "./logger.js";
1
+
import { LINK_SHORTENER, POST_CHECKS, langs } from './constants.js';
2
+
import logger from './logger.js';
4
3
import {
5
4
createPostLabel,
6
5
createAccountReport,
7
6
createAccountComment,
8
7
createPostReport,
9
-
} from "./moderation.js";
10
-
import { getFinalUrl, getLanguage } from "./utils.js";
8
+
} from './moderation.js';
9
+
import type { Post } from './types.js';
10
+
import { getFinalUrl, getLanguage } from './utils.js';
11
11
12
12
export const checkPosts = async (post: Post[]) => {
13
13
// Get a list of labels
···
68
68
// Check if post is whitelisted
69
69
if (checkPost?.whitelist) {
70
70
if (checkPost?.whitelist.test(post[0].text)) {
71
-
logger.info(`[CHECKPOSTS]: Whitelisted phrase found"`);
71
+
logger.info('[CHECKPOSTS]: Whitelisted phrase found"');
72
72
return;
73
73
}
74
74
}
+7
-7
src/checkProfiles.ts
+7
-7
src/checkProfiles.ts
···
1
-
import { login } from "./agent.js";
2
-
import { langs, PROFILE_CHECKS } from "./constants.js";
3
-
import logger from "./logger.js";
1
+
import { login } from './agent.js';
2
+
import { langs, PROFILE_CHECKS } from './constants.js';
3
+
import logger from './logger.js';
4
4
import {
5
5
createAccountReport,
6
6
createAccountLabel,
7
7
createAccountComment,
8
-
} from "./moderation.js";
9
-
import { getLanguage } from "./utils.js";
8
+
} from './moderation.js';
9
+
import { getLanguage } from './utils.js';
10
10
11
11
export const checkDescription = async (
12
12
did: string,
···
47
47
// Check if description is whitelisted
48
48
if (checkProfiles!.whitelist) {
49
49
if (checkProfiles!.whitelist.test(description)) {
50
-
logger.info(`[CHECKDESCRIPTION]: Whitelisted phrase found.`);
50
+
logger.info('[CHECKDESCRIPTION]: Whitelisted phrase found.');
51
51
return;
52
52
}
53
53
}
···
128
128
// Check if displayName is whitelisted
129
129
if (checkProfiles!.whitelist) {
130
130
if (checkProfiles!.whitelist.test(displayName)) {
131
-
logger.info(`[CHECKDISPLAYNAME]: Whitelisted phrase found.`);
131
+
logger.info('[CHECKDISPLAYNAME]: Whitelisted phrase found.');
132
132
return;
133
133
}
134
134
}
+4
-4
src/checkStarterPack.ts
+4
-4
src/checkStarterPack.ts
···
1
-
import { PROFILE_CHECKS, STARTERPACK_CHECKS } from "./constants.js";
2
-
import logger from "./logger.js";
1
+
import { PROFILE_CHECKS, STARTERPACK_CHECKS } from './constants.js';
2
+
import logger from './logger.js';
3
3
import {
4
4
createAccountLabel,
5
5
createAccountReport,
6
6
createPostLabel,
7
-
} from "./moderation.js";
7
+
} from './moderation.js';
8
8
9
9
export const checkStarterPack = async (
10
10
did: string,
···
26
26
// Check if DID is whitelisted
27
27
if (checkProfiles?.ignoredDIDs) {
28
28
if (checkProfiles.ignoredDIDs.includes(did)) {
29
-
return logger.info(`Whitelisted DID: ${did}`);
29
+
logger.info(`Whitelisted DID: ${did}`); return;
30
30
}
31
31
}
32
32
+13
-13
src/config.ts
+13
-13
src/config.ts
···
1
-
import "dotenv/config";
1
+
import 'dotenv/config';
2
2
3
-
export const MOD_DID = process.env.DID ?? "";
4
-
export const OZONE_URL = process.env.OZONE_URL ?? "";
5
-
export const OZONE_PDS = process.env.OZONE_PDS ?? "";
6
-
export const BSKY_HANDLE = process.env.BSKY_HANDLE ?? "";
7
-
export const BSKY_PASSWORD = process.env.BSKY_PASSWORD ?? "";
8
-
export const HOST = process.env.HOST ?? "127.0.0.1";
3
+
export const MOD_DID = process.env.DID ?? '';
4
+
export const OZONE_URL = process.env.OZONE_URL ?? '';
5
+
export const OZONE_PDS = process.env.OZONE_PDS ?? '';
6
+
export const BSKY_HANDLE = process.env.BSKY_HANDLE ?? '';
7
+
export const BSKY_PASSWORD = process.env.BSKY_PASSWORD ?? '';
8
+
export const HOST = process.env.HOST ?? '127.0.0.1';
9
9
export const PORT = process.env.PORT ? Number(process.env.PORT) : 4100;
10
10
export const METRICS_PORT = process.env.METRICS_PORT
11
11
? Number(process.env.METRICS_PORT)
12
12
: 4101; // Left this intact from the code I adapted this from
13
13
export const FIREHOSE_URL =
14
-
process.env.FIREHOSE_URL ?? "wss://jetstream1.us-east.bsky.network/subscribe";
14
+
process.env.FIREHOSE_URL ?? 'wss://jetstream1.us-east.bsky.network/subscribe';
15
15
export const WANTED_COLLECTION = [
16
-
"app.bsky.feed.post",
17
-
"app.bsky.actor.defs",
18
-
"app.bsky.actor.profile",
16
+
'app.bsky.feed.post',
17
+
'app.bsky.actor.defs',
18
+
'app.bsky.actor.profile',
19
19
];
20
20
export const CURSOR_UPDATE_INTERVAL = process.env.CURSOR_UPDATE_INTERVAL
21
21
? Number(process.env.CURSOR_UPDATE_INTERVAL)
22
22
: 60000;
23
-
export const LABEL_LIMIT = process.env.LABEL_LIMIT;
24
-
export const LABEL_LIMIT_WAIT = process.env.LABEL_LIMIT_WAIT;
23
+
export const { LABEL_LIMIT } = process.env;
24
+
export const { LABEL_LIMIT_WAIT } = process.env;
+3
-2
src/developing_checks.md
+3
-2
src/developing_checks.md
···
1
1
# How to build checks for skywatch-automod
2
2
3
3
## Introduction
4
+
4
5
Constants.ts defines three types of types of checks: `HANDLE_CHECKS`, `POST_CHECKS`, and `PROFILE_CHECKS`.
5
6
6
7
For each check, users need to define a set of regular expressions that will be used to match against the content of the post, handle, or profile. A maximal example of a check is as follows:
···
19
20
toLabel: true, // Should the handle in question be labeled if check evaluates to true.
20
21
check: new RegExp("example", "i"), // Regular expression to match against the content
21
22
whitelist: new RegExp("example.com", "i"), // Optional, regular expression to whitelist content
22
-
ignoredDIDs: ["did:plc:example"] // Optional, array of DIDs to ignore if they match the check. Useful for folks who reclaim words or accounts which may be false positives.
23
-
}
23
+
ignoredDIDs: ["did:plc:example"], // Optional, array of DIDs to ignore if they match the check. Useful for folks who reclaim words or accounts which may be false positives.
24
+
},
24
25
];
25
26
```
26
27
+1
-1
src/limits.ts
+1
-1
src/limits.ts
+21
-21
src/lists.ts
+21
-21
src/lists.ts
···
1
-
import { List } from "./types.js";
1
+
import type { List } from './types.js';
2
2
3
3
export const LISTS: List[] = [
4
4
{
5
-
label: "blue-heart-emoji",
6
-
rkey: "3lfbtgosyyi22",
5
+
label: 'blue-heart-emoji',
6
+
rkey: '3lfbtgosyyi22',
7
7
},
8
8
{
9
-
label: "troll",
10
-
rkey: "3lbckxhgu3r2v",
9
+
label: 'troll',
10
+
rkey: '3lbckxhgu3r2v',
11
11
},
12
12
{
13
-
label: "maga-trump",
14
-
rkey: "3l53cjwlt4o2s",
13
+
label: 'maga-trump',
14
+
rkey: '3l53cjwlt4o2s',
15
15
},
16
16
{
17
-
label: "elon-musk",
18
-
rkey: "3l72tte74wa2m",
17
+
label: 'elon-musk',
18
+
rkey: '3l72tte74wa2m',
19
19
},
20
20
{
21
-
label: "rmve-imve",
22
-
rkey: "3l6tfurf7li27",
21
+
label: 'rmve-imve',
22
+
rkey: '3l6tfurf7li27',
23
23
},
24
24
{
25
-
label: "nazi-symbolism",
26
-
rkey: "3l6vdudxgeb2z",
25
+
label: 'nazi-symbolism',
26
+
rkey: '3l6vdudxgeb2z',
27
27
},
28
28
{
29
-
label: "hammer-sickle",
30
-
rkey: "3l4ue6w2aur2v",
29
+
label: 'hammer-sickle',
30
+
rkey: '3l4ue6w2aur2v',
31
31
},
32
32
{
33
-
label: "inverted-red-triangle",
34
-
rkey: "3l4ueabtpec2a",
33
+
label: 'inverted-red-triangle',
34
+
rkey: '3l4ueabtpec2a',
35
35
},
36
36
{
37
-
label: "automated-reply-guy",
38
-
rkey: "3lch7qbvzpx23",
37
+
label: 'automated-reply-guy',
38
+
rkey: '3lch7qbvzpx23',
39
39
},
40
40
{
41
-
label: "terf-gc",
42
-
rkey: "3lcqjqjdejs2x",
41
+
label: 'terf-gc',
42
+
rkey: '3lcqjqjdejs2x',
43
43
},
44
44
];
+10
-10
src/logger.ts
+10
-10
src/logger.ts
···
1
-
import { pino } from "pino";
1
+
import { pino } from 'pino';
2
2
3
3
const logger = pino({
4
-
level: process.env.LOG_LEVEL ?? "info",
4
+
level: process.env.LOG_LEVEL ?? 'info',
5
5
transport:
6
-
process.env.NODE_ENV !== "production"
6
+
process.env.NODE_ENV !== 'production'
7
7
? {
8
-
target: "pino-pretty",
9
-
options: {
10
-
colorize: true,
11
-
translateTime: "SYS:standard",
12
-
ignore: "pid,hostname",
13
-
},
14
-
}
8
+
target: 'pino-pretty',
9
+
options: {
10
+
colorize: true,
11
+
translateTime: 'SYS:standard',
12
+
ignore: 'pid,hostname',
13
+
},
14
+
}
15
15
: undefined,
16
16
timestamp: pino.stdTimeFunctions.isoTime,
17
17
});
+59
-55
src/main.ts
+59
-55
src/main.ts
···
1
-
import {
1
+
import fs from 'node:fs';
2
+
3
+
import type {
2
4
CommitCreateEvent,
3
-
CommitUpdate,
4
5
CommitUpdateEvent,
5
-
IdentityEvent,
6
+
IdentityEvent } from '@skyware/jetstream';
7
+
import {
8
+
CommitUpdate,
6
9
Jetstream,
7
-
} from "@skyware/jetstream";
8
-
import fs from "node:fs";
10
+
} from '@skyware/jetstream';
11
+
9
12
13
+
import { checkHandle } from './checkHandles.js';
14
+
import { checkPosts } from './checkPosts.js';
15
+
import { checkDescription, checkDisplayName } from './checkProfiles.js';
16
+
import { checkStarterPack, checkNewStarterPack } from './checkStarterPack.js';
10
17
import {
11
18
CURSOR_UPDATE_INTERVAL,
12
19
FIREHOSE_URL,
13
20
METRICS_PORT,
14
21
WANTED_COLLECTION,
15
-
} from "./config.js";
16
-
import logger from "./logger.js";
17
-
import { startMetricsServer } from "./metrics.js";
18
-
import { Post, LinkFeature, Handle } from "./types.js";
19
-
import { checkPosts } from "./checkPosts.js";
20
-
import { checkHandle } from "./checkHandles.js";
21
-
import { checkStarterPack, checkNewStarterPack } from "./checkStarterPack.js";
22
-
import { checkDescription, checkDisplayName } from "./checkProfiles.js";
22
+
} from './config.js';
23
+
import logger from './logger.js';
24
+
import { startMetricsServer } from './metrics.js';
25
+
import type { Post, LinkFeature } from './types.js';
26
+
import { Handle } from './types.js';
23
27
24
28
let cursor = 0;
25
29
let cursorUpdateInterval: NodeJS.Timeout;
···
29
33
}
30
34
31
35
try {
32
-
logger.info("Trying to read cursor from cursor.txt...");
33
-
cursor = Number(fs.readFileSync("cursor.txt", "utf8"));
36
+
logger.info('Trying to read cursor from cursor.txt...');
37
+
cursor = Number(fs.readFileSync('cursor.txt', 'utf8'));
34
38
logger.info(`Cursor found: ${cursor} (${epochUsToDateTime(cursor)})`);
35
39
} catch (error) {
36
-
if (error instanceof Error && "code" in error && error.code === "ENOENT") {
40
+
if (error instanceof Error && 'code' in error && error.code === 'ENOENT') {
37
41
cursor = Math.floor(Date.now() * 1000);
38
42
logger.info(
39
43
`Cursor not found in cursor.txt, setting cursor to: ${cursor} (${epochUsToDateTime(cursor)})`,
40
44
);
41
-
fs.writeFileSync("cursor.txt", cursor.toString(), "utf8");
45
+
fs.writeFileSync('cursor.txt', cursor.toString(), 'utf8');
42
46
} else {
43
47
logger.error(error);
44
48
process.exit(1);
···
48
52
const jetstream = new Jetstream({
49
53
wantedCollections: WANTED_COLLECTION,
50
54
endpoint: FIREHOSE_URL,
51
-
cursor: cursor,
55
+
cursor,
52
56
});
53
57
54
-
jetstream.on("open", () => {
58
+
jetstream.on('open', () => {
55
59
if (jetstream.cursor) {
56
60
logger.info(
57
61
`Connected to Jetstream at ${FIREHOSE_URL} with cursor ${jetstream.cursor} (${epochUsToDateTime(jetstream.cursor)})`,
···
66
70
logger.info(
67
71
`Cursor updated to: ${jetstream.cursor} (${epochUsToDateTime(jetstream.cursor)})`,
68
72
);
69
-
fs.writeFile("cursor.txt", jetstream.cursor.toString(), (err) => {
73
+
fs.writeFile('cursor.txt', jetstream.cursor.toString(), (err) => {
70
74
if (err) logger.error(err);
71
75
});
72
76
}
73
77
}, CURSOR_UPDATE_INTERVAL);
74
78
});
75
79
76
-
jetstream.on("close", () => {
80
+
jetstream.on('close', () => {
77
81
clearInterval(cursorUpdateInterval);
78
-
logger.info("Jetstream connection closed.");
82
+
logger.info('Jetstream connection closed.');
79
83
});
80
84
81
-
jetstream.on("error", (error) => {
85
+
jetstream.on('error', (error) => {
82
86
logger.error(`Jetstream error: ${error.message}`);
83
87
});
84
88
85
89
// Check for post updates
86
90
87
91
jetstream.onCreate(
88
-
"app.bsky.feed.post",
89
-
(event: CommitCreateEvent<"app.bsky.feed.post">) => {
92
+
'app.bsky.feed.post',
93
+
(event: CommitCreateEvent<'app.bsky.feed.post'>) => {
90
94
const atURI = `at://${event.did}/app.bsky.feed.post/${event.commit.rkey}`;
91
-
const hasFacets = event.commit.record.hasOwnProperty("facets");
92
-
const hasText = event.commit.record.hasOwnProperty("text");
95
+
const hasFacets = event.commit.record.hasOwnProperty('facets');
96
+
const hasText = event.commit.record.hasOwnProperty('text');
93
97
94
98
const tasks: Promise<void>[] = [];
95
99
···
97
101
if (hasFacets) {
98
102
const hasLinkType = event.commit.record.facets!.some((facet) =>
99
103
facet.features.some(
100
-
(feature) => feature.$type === "app.bsky.richtext.facet#link",
104
+
(feature) => feature.$type === 'app.bsky.richtext.facet#link',
101
105
),
102
106
);
103
107
104
108
if (hasLinkType) {
105
109
const urls = event.commit.record
106
110
.facets!.flatMap((facet) =>
107
-
facet.features.filter(
108
-
(feature) => feature.$type === "app.bsky.richtext.facet#link",
109
-
),
110
-
)
111
+
facet.features.filter(
112
+
(feature) => feature.$type === 'app.bsky.richtext.facet#link',
113
+
),
114
+
)
111
115
.map((feature: LinkFeature) => feature.uri);
112
116
113
117
urls.forEach((url) => {
···
116
120
did: event.did,
117
121
time: event.time_us,
118
122
rkey: event.commit.rkey,
119
-
atURI: atURI,
123
+
atURI,
120
124
text: url,
121
125
cid: event.commit.cid,
122
126
},
···
130
134
did: event.did,
131
135
time: event.time_us,
132
136
rkey: event.commit.rkey,
133
-
atURI: atURI,
137
+
atURI,
134
138
text: event.commit.record.text,
135
139
cid: event.commit.cid,
136
140
},
···
142
146
143
147
// Check for profile updates
144
148
jetstream.onUpdate(
145
-
"app.bsky.actor.profile",
146
-
async (event: CommitUpdateEvent<"app.bsky.actor.profile">) => {
149
+
'app.bsky.actor.profile',
150
+
async (event: CommitUpdateEvent<'app.bsky.actor.profile'>) => {
147
151
try {
148
152
if (event.commit.record.displayName || event.commit.record.description) {
149
153
checkDescription(
150
154
event.did,
151
155
event.time_us,
152
-
event.commit.record.displayName as string,
153
-
event.commit.record.description as string,
156
+
event.commit.record.displayName!,
157
+
event.commit.record.description!,
154
158
);
155
159
checkDisplayName(
156
160
event.did,
157
161
event.time_us,
158
-
event.commit.record.displayName as string,
159
-
event.commit.record.description as string,
162
+
event.commit.record.displayName!,
163
+
event.commit.record.description!,
160
164
);
161
165
}
162
166
···
176
180
// Check for profile updates
177
181
178
182
jetstream.onCreate(
179
-
"app.bsky.actor.profile",
180
-
async (event: CommitCreateEvent<"app.bsky.actor.profile">) => {
183
+
'app.bsky.actor.profile',
184
+
async (event: CommitCreateEvent<'app.bsky.actor.profile'>) => {
181
185
try {
182
186
if (event.commit.record.displayName || event.commit.record.description) {
183
187
checkDescription(
184
188
event.did,
185
189
event.time_us,
186
-
event.commit.record.displayName as string,
187
-
event.commit.record.description as string,
190
+
event.commit.record.displayName!,
191
+
event.commit.record.description!,
188
192
);
189
193
checkDisplayName(
190
194
event.did,
191
195
event.time_us,
192
-
event.commit.record.displayName as string,
193
-
event.commit.record.description as string,
196
+
event.commit.record.displayName!,
197
+
event.commit.record.description!,
194
198
);
195
199
196
200
if (event.commit.record.joinedViaStarterPack) {
···
210
214
);
211
215
212
216
jetstream.onCreate(
213
-
"app.bsky.graph.starterpack",
214
-
async (event: CommitCreateEvent<"app.bsky.graph.starterpack">) => {
217
+
'app.bsky.graph.starterpack',
218
+
async (event: CommitCreateEvent<'app.bsky.graph.starterpack'>) => {
215
219
try {
216
220
const atURI = `at://${event.did}/app.bsky.feed.post/${event.commit.rkey}`;
217
221
···
230
234
);
231
235
232
236
jetstream.onUpdate(
233
-
"app.bsky.graph.starterpack",
234
-
async (event: CommitUpdateEvent<"app.bsky.graph.starterpack">) => {
237
+
'app.bsky.graph.starterpack',
238
+
async (event: CommitUpdateEvent<'app.bsky.graph.starterpack'>) => {
235
239
try {
236
240
const atURI = `at://${event.did}/app.bsky.feed.post/${event.commit.rkey}`;
237
241
···
250
254
);
251
255
252
256
// Check for handle updates
253
-
jetstream.on("identity", async (event: IdentityEvent) => {
257
+
jetstream.on('identity', async (event: IdentityEvent) => {
254
258
if (event.identity.handle) {
255
259
checkHandle(event.identity.did, event.identity.handle, event.time_us);
256
260
}
···
270
274
271
275
function shutdown() {
272
276
try {
273
-
logger.info("Shutting down gracefully...");
274
-
fs.writeFileSync("cursor.txt", jetstream.cursor!.toString(), "utf8");
277
+
logger.info('Shutting down gracefully...');
278
+
fs.writeFileSync('cursor.txt', jetstream.cursor!.toString(), 'utf8');
275
279
jetstream.close();
276
280
metricsServer.close();
277
281
} catch (error) {
···
280
284
}
281
285
}
282
286
283
-
process.on("SIGINT", shutdown);
284
-
process.on("SIGTERM", shutdown);
287
+
process.on('SIGINT', shutdown);
288
+
process.on('SIGTERM', shutdown);
+6
-6
src/metrics.ts
+6
-6
src/metrics.ts
···
1
-
import express from "express";
2
-
import { Registry, collectDefaultMetrics } from "prom-client";
1
+
import express from 'express';
2
+
import { Registry, collectDefaultMetrics } from 'prom-client';
3
3
4
-
import logger from "./logger.js";
4
+
import logger from './logger.js';
5
5
6
6
const register = new Registry();
7
7
collectDefaultMetrics({ register });
8
8
9
9
const app = express();
10
10
11
-
app.get("/metrics", (req, res) => {
11
+
app.get('/metrics', (req, res) => {
12
12
register
13
13
.metrics()
14
14
.then((metrics) => {
15
-
res.set("Content-Type", register.contentType);
15
+
res.set('Content-Type', register.contentType);
16
16
res.send(metrics);
17
17
})
18
18
.catch((ex: unknown) => {
···
21
21
});
22
22
});
23
23
24
-
export const startMetricsServer = (port: number, host = "127.0.0.1") => {
24
+
export const startMetricsServer = (port: number, host = '127.0.0.1') => {
25
25
return app.listen(port, host, () => {
26
26
logger.info(`Metrics server is listening on ${host}:${port}`);
27
27
});
+52
-52
src/moderation.ts
+52
-52
src/moderation.ts
···
1
-
import { agent, isLoggedIn } from "./agent.js";
2
-
import { MOD_DID } from "./config.js";
3
-
import { limit } from "./limits.js";
4
-
import logger from "./logger.js";
5
-
import { LISTS } from "./lists.js";
1
+
import { agent, isLoggedIn } from './agent.js';
2
+
import { MOD_DID } from './config.js';
3
+
import { limit } from './limits.js';
4
+
import { LISTS } from './lists.js';
5
+
import logger from './logger.js';
6
6
7
7
export const createPostLabel = async (
8
8
uri: string,
···
16
16
return agent.tools.ozone.moderation.emitEvent(
17
17
{
18
18
event: {
19
-
$type: "tools.ozone.moderation.defs#modEventLabel",
20
-
comment: comment,
19
+
$type: 'tools.ozone.moderation.defs#modEventLabel',
20
+
comment,
21
21
createLabelVals: [label],
22
22
negateLabelVals: [],
23
23
},
24
24
// specify the labeled post by strongRef
25
25
subject: {
26
-
$type: "com.atproto.repo.strongRef",
27
-
uri: uri,
28
-
cid: cid,
26
+
$type: 'com.atproto.repo.strongRef',
27
+
uri,
28
+
cid,
29
29
},
30
30
// put in the rest of the metadata
31
31
createdBy: `${agent.did}`,
32
32
createdAt: new Date().toISOString(),
33
33
},
34
34
{
35
-
encoding: "application/json",
35
+
encoding: 'application/json',
36
36
headers: {
37
-
"atproto-proxy": `${MOD_DID!}#atproto_labeler`,
38
-
"atproto-accept-labelers":
39
-
"did:plc:ar7c4by46qjdydhdevvrndac;redact",
37
+
'atproto-proxy': `${MOD_DID}#atproto_labeler`,
38
+
'atproto-accept-labelers':
39
+
'did:plc:ar7c4by46qjdydhdevvrndac;redact',
40
40
},
41
41
},
42
42
);
···
57
57
await agent.tools.ozone.moderation.emitEvent(
58
58
{
59
59
event: {
60
-
$type: "tools.ozone.moderation.defs#modEventLabel",
61
-
comment: comment,
60
+
$type: 'tools.ozone.moderation.defs#modEventLabel',
61
+
comment,
62
62
createLabelVals: [label],
63
63
negateLabelVals: [],
64
64
},
65
65
// specify the labeled post by strongRef
66
66
subject: {
67
-
$type: "com.atproto.admin.defs#repoRef",
68
-
did: did,
67
+
$type: 'com.atproto.admin.defs#repoRef',
68
+
did,
69
69
},
70
70
// put in the rest of the metadata
71
71
createdBy: `${agent.did}`,
72
72
createdAt: new Date().toISOString(),
73
73
},
74
74
{
75
-
encoding: "application/json",
75
+
encoding: 'application/json',
76
76
headers: {
77
-
"atproto-proxy": `${MOD_DID!}#atproto_labeler`,
78
-
"atproto-accept-labelers":
79
-
"did:plc:ar7c4by46qjdydhdevvrndac;redact",
77
+
'atproto-proxy': `${MOD_DID}#atproto_labeler`,
78
+
'atproto-accept-labelers':
79
+
'did:plc:ar7c4by46qjdydhdevvrndac;redact',
80
80
},
81
81
},
82
82
);
···
97
97
return agent.tools.ozone.moderation.emitEvent(
98
98
{
99
99
event: {
100
-
$type: "tools.ozone.moderation.defs#modEventReport",
101
-
comment: comment,
102
-
reportType: "com.atproto.moderation.defs#reasonOther",
100
+
$type: 'tools.ozone.moderation.defs#modEventReport',
101
+
comment,
102
+
reportType: 'com.atproto.moderation.defs#reasonOther',
103
103
},
104
104
// specify the labeled post by strongRef
105
105
subject: {
106
-
$type: "com.atproto.repo.strongRef",
107
-
uri: uri,
108
-
cid: cid,
106
+
$type: 'com.atproto.repo.strongRef',
107
+
uri,
108
+
cid,
109
109
},
110
110
// put in the rest of the metadata
111
111
createdBy: `${agent.did}`,
112
112
createdAt: new Date().toISOString(),
113
113
},
114
114
{
115
-
encoding: "application/json",
115
+
encoding: 'application/json',
116
116
headers: {
117
-
"atproto-proxy": `${MOD_DID!}#atproto_labeler`,
118
-
"atproto-accept-labelers":
119
-
"did:plc:ar7c4by46qjdydhdevvrndac;redact",
117
+
'atproto-proxy': `${MOD_DID}#atproto_labeler`,
118
+
'atproto-accept-labelers':
119
+
'did:plc:ar7c4by46qjdydhdevvrndac;redact',
120
120
},
121
121
},
122
122
);
···
133
133
await agent.tools.ozone.moderation.emitEvent(
134
134
{
135
135
event: {
136
-
$type: "tools.ozone.moderation.defs#modEventComment",
137
-
comment: comment,
136
+
$type: 'tools.ozone.moderation.defs#modEventComment',
137
+
comment,
138
138
},
139
139
// specify the labeled post by strongRef
140
140
subject: {
141
-
$type: "com.atproto.admin.defs#repoRef",
142
-
did: did,
141
+
$type: 'com.atproto.admin.defs#repoRef',
142
+
did,
143
143
},
144
144
// put in the rest of the metadata
145
145
createdBy: `${agent.did}`,
146
146
createdAt: new Date().toISOString(),
147
147
},
148
148
{
149
-
encoding: "application/json",
149
+
encoding: 'application/json',
150
150
headers: {
151
-
"atproto-proxy": `${MOD_DID!}#atproto_labeler`,
152
-
"atproto-accept-labelers":
153
-
"did:plc:ar7c4by46qjdydhdevvrndac;redact",
151
+
'atproto-proxy': `${MOD_DID}#atproto_labeler`,
152
+
'atproto-accept-labelers':
153
+
'did:plc:ar7c4by46qjdydhdevvrndac;redact',
154
154
},
155
155
},
156
156
);
···
167
167
await agent.tools.ozone.moderation.emitEvent(
168
168
{
169
169
event: {
170
-
$type: "tools.ozone.moderation.defs#modEventReport",
171
-
comment: comment,
172
-
reportType: "com.atproto.moderation.defs#reasonOther",
170
+
$type: 'tools.ozone.moderation.defs#modEventReport',
171
+
comment,
172
+
reportType: 'com.atproto.moderation.defs#reasonOther',
173
173
},
174
174
// specify the labeled post by strongRef
175
175
subject: {
176
-
$type: "com.atproto.admin.defs#repoRef",
177
-
did: did,
176
+
$type: 'com.atproto.admin.defs#repoRef',
177
+
did,
178
178
},
179
179
// put in the rest of the metadata
180
180
createdBy: `${agent.did}`,
181
181
createdAt: new Date().toISOString(),
182
182
},
183
183
{
184
-
encoding: "application/json",
184
+
encoding: 'application/json',
185
185
headers: {
186
-
"atproto-proxy": `${MOD_DID!}#atproto_labeler`,
187
-
"atproto-accept-labelers":
188
-
"did:plc:ar7c4by46qjdydhdevvrndac;redact",
186
+
'atproto-proxy': `${MOD_DID}#atproto_labeler`,
187
+
'atproto-accept-labelers':
188
+
'did:plc:ar7c4by46qjdydhdevvrndac;redact',
189
189
},
190
190
},
191
191
);
···
207
207
}
208
208
logger.info(`New label added to list: ${newList.label}`);
209
209
210
-
const listUri = `at://${MOD_DID!}/app.bsky.graph.list/${newList.rkey}`;
210
+
const listUri = `at://${MOD_DID}/app.bsky.graph.list/${newList.rkey}`;
211
211
212
212
await limit(async () => {
213
213
try {
214
214
await agent.com.atproto.repo.createRecord({
215
-
collection: "app.bsky.graph.listitem",
216
-
repo: `${MOD_DID!}`,
215
+
collection: 'app.bsky.graph.listitem',
216
+
repo: MOD_DID,
217
217
record: {
218
218
subject: did,
219
219
list: listUri,
+9
-8
src/monitor.ts
+9
-8
src/monitor.ts
···
1
-
import { describe } from "node:test";
2
-
import { PROFILE_CHECKS } from "./constants.js";
3
-
import logger from "./logger.js";
4
-
import { createAccountReport, createAccountLabel } from "./moderation.js";
1
+
import { describe } from 'node:test';
2
+
3
+
import { PROFILE_CHECKS } from './constants.js';
4
+
import logger from './logger.js';
5
+
import { createAccountReport, createAccountLabel } from './moderation.js';
5
6
6
7
export const monitorDescription = async (
7
8
did: string,
···
24
25
// Check if DID is whitelisted
25
26
if (checkProfiles?.ignoredDIDs) {
26
27
if (checkProfiles.ignoredDIDs.includes(did)) {
27
-
return logger.info(`Whitelisted DID: ${did}`);
28
+
logger.info(`Whitelisted DID: ${did}`); return;
28
29
}
29
30
}
30
31
···
33
34
if (checkProfiles!.check.test(description)) {
34
35
if (checkProfiles!.whitelist) {
35
36
if (checkProfiles!.whitelist.test(description)) {
36
-
logger.info(`Whitelisted phrase found.`);
37
+
logger.info('Whitelisted phrase found.');
37
38
return;
38
39
}
39
40
} else {
···
80
81
// Check if DID is whitelisted
81
82
if (checkProfiles?.ignoredDIDs) {
82
83
if (checkProfiles.ignoredDIDs.includes(did)) {
83
-
return logger.info(`Whitelisted DID: ${did}`);
84
+
logger.info(`Whitelisted DID: ${did}`); return;
84
85
}
85
86
}
86
87
···
89
90
if (checkProfiles!.check.test(displayName)) {
90
91
if (checkProfiles!.whitelist) {
91
92
if (checkProfiles!.whitelist.test(displayName)) {
92
-
logger.info(`Whitelisted phrase found.`);
93
+
logger.info('Whitelisted phrase found.');
93
94
return;
94
95
}
95
96
} else {
+1
-1
src/types.ts
+1
-1
src/types.ts
+14
-14
src/utils.ts
+14
-14
src/utils.ts
···
1
-
import logger from "./logger.js";
1
+
import logger from './logger.js';
2
2
3
3
/* Normalize the Unicode characters: this doesn't consistently work yet, there is something about certain bluesky strings that causes it to fail. */
4
4
export function normalizeUnicode(text: string): string {
5
5
// First decompose the characters (NFD)
6
-
const decomposed = text.normalize("NFD");
6
+
const decomposed = text.normalize('NFD');
7
7
8
8
// Remove diacritics and combining marks
9
-
const withoutDiacritics = decomposed.replace(/[\u0300-\u036f]/g, "");
9
+
const withoutDiacritics = decomposed.replace(/[\u0300-\u036f]/g, '');
10
10
11
11
// Remove mathematical alphanumeric symbols
12
12
const withoutMath = withoutDiacritics.replace(
···
31
31
);
32
32
33
33
// Final NFKC normalization to handle any remaining special characters
34
-
return withoutMath.normalize("NFKC");
34
+
return withoutMath.normalize('NFKC');
35
35
}
36
36
37
37
export async function getFinalUrl(url: string): Promise<string> {
38
38
const controller = new AbortController();
39
-
const timeoutId = setTimeout(() => controller.abort(), 10000); // 10-second timeout
39
+
const timeoutId = setTimeout(() => { controller.abort(); }, 10000); // 10-second timeout
40
40
41
41
try {
42
42
const response = await fetch(url, {
43
-
method: "HEAD",
44
-
redirect: "follow", // This will follow redirects automatically
43
+
method: 'HEAD',
44
+
redirect: 'follow', // This will follow redirects automatically
45
45
signal: controller.signal, // Pass the abort signal to fetch
46
46
});
47
47
clearTimeout(timeoutId); // Clear the timeout if fetch completes
···
49
49
} catch (error) {
50
50
clearTimeout(timeoutId); // Clear the timeout if fetch fails
51
51
// Log the error with more specific information if it's a timeout
52
-
if (error instanceof Error && error.name === "AbortError") {
52
+
if (error instanceof Error && error.name === 'AbortError') {
53
53
logger.warn(`Timeout fetching URL: ${url}`, error);
54
54
} else {
55
55
logger.warn(`Error fetching URL: ${url}`, error);
···
59
59
}
60
60
61
61
export async function getLanguage(profile: string): Promise<string> {
62
-
if (typeof profile !== "string" || profile === null) {
62
+
if (typeof profile !== 'string' || profile === null) {
63
63
logger.warn(
64
-
"[GETLANGUAGE] getLanguage called with invalid profile data, defaulting to 'eng'.",
64
+
'[GETLANGUAGE] getLanguage called with invalid profile data, defaulting to \'eng\'.',
65
65
profile,
66
66
);
67
-
return "eng"; // Default or throw an error
67
+
return 'eng'; // Default or throw an error
68
68
}
69
69
70
70
const profileText = profile.trim();
71
71
72
72
if (profileText.length === 0) {
73
-
return "eng";
73
+
return 'eng';
74
74
}
75
75
76
-
const lande = (await import("lande")).default;
77
-
let langsProbabilityMap = lande(profileText);
76
+
const lande = (await import('lande')).default;
77
+
const langsProbabilityMap = lande(profileText);
78
78
79
79
// Sort by probability in descending order
80
80
langsProbabilityMap.sort(