This commit is contained in:
Florian Federspiel
2023-11-25 16:53:52 +01:00
commit 677030f712
685 changed files with 148719 additions and 0 deletions

64
test/imaps/node_modules/encoding-japanese/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,64 @@
# Change Log
## [2.0.0](https://github.com/polygonplanet/encoding.js/compare/1.0.30...2.0.0) (2022-03-29)
### Added
* Add `Encoding.version` ([bd3d6ef](https://github.com/polygonplanet/encoding.js/commit/bd3d6ef511a17c2d9671453e6c93618dae7ae9db))
* Add `fallback` option to `Encoding.convert` ([#23](https://github.com/polygonplanet/encoding.js/pull/23)) ([5622bfa](https://github.com/polygonplanet/encoding.js/commit/5622bfa4b2ee3981d664315b743094fcfd4d01a0)) Thanks [@tohutohu](https://github.com/tohutohu)
### Fixed
* Fix deprecated Buffer constructor ([b8fda07](https://github.com/polygonplanet/encoding.js/commit/b8fda07f6957f9197210fcda196cb2d6cc28e7a1))
### Removed
* Drop `bower` support ([981ea39](https://github.com/polygonplanet/encoding.js/commit/981ea3947021faa87e12774e0786c6b13fe09124))
## [1.0.30](https://github.com/polygonplanet/encoding.js/compare/1.0.29...1.0.30) (2019-09-12)
### Added
* Add LICENSE ([0224ebb](https://github.com/polygonplanet/encoding.js/commit/0224ebb620ae4058064f80ec3ec5898181595abe))
## [1.0.29](https://github.com/polygonplanet/encoding.js/compare/1.0.28...1.0.29) (2018-05-11)
### Fixed
* Fix can't find module in using 'require' ([#8](https://github.com/polygonplanet/encoding.js/issues/8)) ([5cf89b8](https://github.com/polygonplanet/encoding.js/commit/5cf89b85758d2466fd52a9690eed27ebaeba1e5e))
## [1.0.28](https://github.com/polygonplanet/encoding.js/compare/1.0.26...1.0.28) (2018-02-01)
### Changed
* Drop `Gruntfile.js` and modularize the code base by `browserify`
## [1.0.26](https://github.com/polygonplanet/encoding.js/compare/1.0.25...1.0.26) (2017-08-21)
### Fixed
* Fix the grammar in README.md ([#4](https://github.com/polygonplanet/encoding.js/pull/4)) Thanks [@iku000888](https://github.com/iku000888)
## [1.0.25](https://github.com/polygonplanet/encoding.js/compare/1.0.24...1.0.25) (2016-11-03)
### Fixed
* Fix argument decision of the detect method ([#3](https://github.com/polygonplanet/encoding.js/pull/3)) Thanks [@spring-raining](https://github.com/spring-raining)
## [1.0.24](https://github.com/polygonplanet/encoding.js/compare/1.0.23...1.0.24) (2015-09-22)
### Added
* Add `base64Encode` and `base64Decode` ([729bb4f](https://github.com/polygonplanet/encoding.js/commit/729bb4fac63dbfbea8dedefe874270ae5d6c2e21))
## [1.0.23](https://github.com/polygonplanet/encoding.js/compare/1.0.21...1.0.23) (2015-04-06)
### Fixed
* Fix internal `isObject()` method for old IE browsers ([#2](https://github.com/polygonplanet/encoding.js/pull/2)) ([32f6c02](https://github.com/polygonplanet/encoding.js/commit/32f6c02e290a36deb158357ddd1ebe34601cb4ea)) Thanks [@dmitrygorbenko](https://github.com/dmitrygorbenko)
## [1.0.21](https://github.com/polygonplanet/encoding.js/compare/1.0.20...1.0.21) (2015-02-12)
### Added
* Add bower.json ([d33bb7c](https://github.com/polygonplanet/encoding.js/commit/d33bb7c225e8e5c53100b6776a8c1d63b7a807e6))

21
test/imaps/node_modules/encoding-japanese/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2012 polygonplanet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

542
test/imaps/node_modules/encoding-japanese/README.md generated vendored Normal file
View File

@@ -0,0 +1,542 @@
encoding.js
===========
[![NPM Version](https://img.shields.io/npm/v/encoding-japanese.svg)](https://www.npmjs.com/package/encoding-japanese)
[![Build Status](https://app.travis-ci.com/polygonplanet/encoding.js.svg?branch=master)](https://app.travis-ci.com/polygonplanet/encoding.js)
[![GitHub License](https://img.shields.io/github/license/polygonplanet/encoding.js.svg)](https://github.com/polygonplanet/encoding.js/blob/master/LICENSE)
Convert or detect character encoding in JavaScript.
[**README (Japanese)**](README_ja.md)
## Table of contents
- [Features](#features)
* [How to use character encoding in strings?](#how-to-use-character-encoding-in-strings)
- [Installation](#installation)
* [npm](#npm)
+ [TypeScript](#typescript)
* [browser (standalone)](#browser-standalone)
* [CDN](#cdn)
- [Supported encodings](#supported-encodings)
* [About `UNICODE`](#about-unicode)
- [Example usage](#example-usage)
- [Demo](#demo)
- [API](#api)
* [Detect character encoding (detect)](#detect-character-encoding-detect)
* [Convert character encoding (convert)](#convert-character-encoding-convert)
+ [Specify conversion options to the argument `to_encoding` as an object](#specify-conversion-options-to-the-argument-to_encoding-as-an-object)
+ [Specify the return type by the `type` option](#specify-the-return-type-by-the-type-option)
+ [Replace to HTML entity (Numeric character reference) when cannot be represented](#replace-to-html-entity-numeric-character-reference-when-cannot-be-represented)
+ [Specify BOM in UTF-16](#specify-bom-in-utf-16)
* [URL Encode/Decode](#url-encodedecode)
* [Base64 Encode/Decode](#base64-encodedecode)
* [Code array to string conversion (codeToString/stringToCode)](#code-array-to-string-conversion-codetostringstringtocode)
* [Japanese Zenkaku/Hankaku conversion](#japanese-zenkakuhankaku-conversion)
- [Other examples](#other-examples)
* [Example using the XMLHttpRequest and Typed arrays (Uint8Array)](#example-using-the-xmlhttprequest-and-typed-arrays-uint8array)
* [Convert encoding for file using the File APIs](#convert-encoding-for-file-using-the-file-apis)
- [Contributing](#contributing)
- [License](#license)
## Features
encoding.js is a JavaScript library for converting and detecting character encodings
that support Japanese character encodings such as `Shift_JIS`, `EUC-JP`, `JIS`, and `Unicode` such as `UTF-8` and `UTF-16`.
Since JavaScript string values are internally encoded as UTF-16 code units ([ref: ECMAScript® 2019 Language Specification - 6.1.4 The String Type](https://www.ecma-international.org/ecma-262/10.0/index.html#sec-ecmascript-language-types-string-type)),
they cannot properly handle other character encodings as they are, but encoding.js enables conversion by handling them as arrays instead of strings.
Each character encoding is handled as an array of numbers with character code values, for example `[130, 160]` ("あ" in UTF-8).
The array of character codes passed to each method of encoding.js can also be used with TypedArray such as `Uint8Array`, and `Buffer` in Node.js.
### How to use character encoding in strings?
Numeric arrays of character codes can be converted to strings with methods such as [`Encoding.codeToString`](#code-array-to-string-conversion-codetostringstringtocode) ,
but because of the above JavaScript specifications, some character encodings cannot be handled properly when converted to strings.
So if you want to use strings instead of arrays, convert it to percent-encoded strings like `'%82%A0'` by using [`Encoding.urlEncode`](#url-encodedecode) and [`Encoding.urlDecode`](#url-encodedecode) to passed to other resources.
Or, [`Encoding.base64Encode`](#base64-encodedecode) and [`Encoding.base64Decode`](#base64-encodedecode) can be passed as strings in the same way.
## Installation
### npm
encoding.js is published under the package name `encoding-japanese` on npm.
```bash
$ npm install --save encoding-japanese
```
#### using `import`
```javascript
import Encoding from 'encoding-japanese';
```
#### using `require`
```javascript
const Encoding = require('encoding-japanese');
```
#### TypeScript
TypeScript type definitions for encoding.js are available at [@types/encoding-japanese](https://www.npmjs.com/package/@types/encoding-japanese) (thanks [@rhysd](https://github.com/rhysd)).
```bash
$ npm install --save-dev @types/encoding-japanese
```
### browser (standalone)
Install from npm or download from the [release list](https://github.com/polygonplanet/encoding.js/tags) and use `encoding.js` or `encoding.min.js` in the package.
\*Please note that if you `git clone`, even the *master* branch may be under development.
```html
<script src="encoding.js"></script>
```
Or use the minified `encoding.min.js`
```html
<script src="encoding.min.js"></script>
```
When the script is loaded, the object `Encoding` is defined in the global scope (ie `window.Encoding`).
### CDN
You can use the encoding.js (package name: `encoding-japanese`) CDN on [cdnjs.com](https://cdnjs.com/libraries/encoding-japanese).
## Supported encodings
|Value in encoding.js|[`detect()`](#detect-character-encoding-detect)|[`convert()`](#convert-character-encoding-convert)|MIME Name (Note)|
|:------:|:----:|:-----:|:---|
|ASCII |✓ | |US-ASCII (Code point range: `0-127`)|
|BINARY |✓ | |(Binary strings. Code point range: `0-255`)|
|EUCJP |✓ |✓ |EUC-JP|
|JIS |✓ |✓ |ISO-2022-JP|
|SJIS |✓ |✓ |Shift_JIS|
|UTF8 |✓ |✓ |UTF-8|
|UTF16 |✓ |✓ |UTF-16|
|UTF16BE |✓ |✓ |UTF-16BE (big-endian)|
|UTF16LE |✓ |✓ |UTF-16LE (little-endian)|
|UTF32 |✓ | |UTF-32|
|UNICODE |✓ |✓ |(JavaScript's internal encoding. *See [About `UNICODE`](#about-unicode) below) |
### About `UNICODE`
In encoding.js, the internal character encoding that can be handled in JavaScript is defined as `UNICODE`.
As mentioned above ([Features](#features)), JavaScript strings are internally encoded in UTF-16 code units, and other character encodings cannot be handled properly.
Therefore, to convert to a character encoding properly represented in JavaScript, specify `UNICODE`.
(*Even if the HTML file encoding is UTF-8, specify `UNICODE` instead of `UTF8` when handling it in JavaScript.)
The value of each character code array returned from `Encoding.convert` is a number of 0-255 if you specify a character code other than `UNICODE` such as `UTF8` or `SJIS`,
or a number of `0-65535` (range of `String.prototype.charCodeAt()` values = Code Unit) if you specify `UNICODE`.
## Example usage
Convert character encoding from JavaScript string (`UNICODE`) to `SJIS`.
```javascript
const unicodeArray = Encoding.stringToCode('こんにちは'); // Convert string to code array
const sjisArray = Encoding.convert(unicodeArray, {
to: 'SJIS',
from: 'UNICODE'
});
console.log(sjisArray);
// [130, 177, 130, 241, 130, 201, 130, 191, 130, 205] ('こんにちは' array in SJIS)
```
Convert character encoding from `SJIS` to `UNICODE`.
```javascript
var sjisArray = [
130, 177, 130, 241, 130, 201, 130, 191, 130, 205
]; // 'こんにちは' array in SJIS
var unicodeArray = Encoding.convert(sjisArray, {
to: 'UNICODE',
from: 'SJIS'
});
var str = Encoding.codeToString(unicodeArray); // Convert code array to string
console.log(str); // 'こんにちは'
```
Detect character encoding.
```javascript
var data = [
227, 129, 147, 227, 130, 147, 227, 129, 171, 227, 129, 161, 227, 129, 175
]; // 'こんにちは' array in UTF-8
var detectedEncoding = Encoding.detect(data);
console.log('Character encoding is ' + detectedEncoding); // 'Character encoding is UTF8'
```
(Node.js) Example of reading a text file written in `SJIS`.
```javascript
const fs = require('fs');
const Encoding = require('encoding-japanese');
const sjisBuffer = fs.readFileSync('./sjis.txt');
const unicodeArray = Encoding.convert(sjisBuffer, {
to: 'UNICODE',
from: 'SJIS'
});
console.log(Encoding.codeToString(unicodeArray));
```
## Demo
* [Test for character encoding conversion (Demo)](http://polygonplanet.github.io/encoding.js/tests/encoding-test.html)
* [Detect and Convert encoding from file (Demo)](http://polygonplanet.github.io/encoding.js/tests/detect-file-encoding.html)
----
## API
* [detect](#detect-character-encoding-detect)
* [convert](#convert-character-encoding-convert)
* [urlEncode / urlDecode](#url-encodedecode)
* [base64Encode / base64Decode](#base64-encodedecode)
* [codeToString / stringToCode](#code-array-to-string-conversion-codetostringstringtocode)
* [Japanese Zenkaku / Hankaku conversion](#japanese-zenkakuhankaku-conversion)
### Detect character encoding (detect)
* {_string|boolean_} Encoding.**detect** ( data [, encodings ] )
Detect character encoding.
@param {_Array|TypedArray|string_} _data_ Target data
@param {_string|Array_} [_encodings_] (Optional) The encoding name that to specify the detection (value of [Supported encodings](#supported-encodings))
@return {_string|boolean_} Return the detected character encoding, or false.
The return value is one of the above "[Supported encodings](#supported-encodings)" or false if it cannot be detected.
```javascript
var sjisArray = [130, 168, 130, 205, 130, 230]; // 'おはよ' array in SJIS
var detectedEncoding = Encoding.detect(sjisArray);
console.log('Encoding is ' + detectedEncoding); // 'Encoding is SJIS'
```
Example of specifying the character encoding to be detected.
If the second argument `encodings` is specified, returns true when it is the specified character encoding, false otherwise.
```javascript
var sjisArray = [130, 168, 130, 205, 130, 230];
var isSJIS = Encoding.detect(sjisArray, 'SJIS');
if (isSJIS) {
console.log('Encoding is SJIS');
}
```
### Convert character encoding (convert)
* {_Array|TypedArray|string_} Encoding.**convert** ( data, to\_encoding [, from\_encoding ] )
Converts character encoding.
@param {_Array|TypedArray|Buffer|string_} _data_ The target data.
@param {_string|Object_} _to\_encoding_ The encoding name of conversion destination, or option to convert as an object.
@param {_string|Array_} [_from\_encoding_] (Optional) The encoding name of the source or 'AUTO'.
@return {_Array|TypedArray|string_} Return the converted array/string.
Example of converting a character code array to Shift_JIS from UTF-8.
```javascript
var utf8Array = [227, 129, 130]; // "あ" in UTF-8
var sjisArray = Encoding.convert(utf8Array, 'SJIS', 'UTF8');
console.log(sjisArray); // [130, 160] ("あ" in SJIS)
```
TypedArray such as `Uint8Array`, and `Buffer` of Node.js can be converted in the same usage.
```javascript
var utf8Array = new Uint8Array([227, 129, 130]);
Encoding.convert(utf8Array, 'SJIS', 'UTF8');
```
Converts character encoding by auto-detecting the encoding name of the source.
```javascript
// The character encoding is automatically detected when the from_encoding argument is omitted
var utf8Array = [227, 129, 130];
var sjisArray = Encoding.convert(utf8Array, 'SJIS');
// Or explicitly specify 'AUTO' to auto-detecting
sjisArray = Encoding.convert(utf8Array, 'SJIS', 'AUTO');
```
#### Specify conversion options to the argument `to_encoding` as an object
You can specify the second argument `to_encoding` as an object for improving readability.
```javascript
var sjisArray = Encoding.convert(utf8Array, {
to: 'SJIS', // to_encoding
from: 'UTF8' // from_encoding
});
```
#### Specify the return type by the `type` option
`convert` returns an array by default, but you can change the return type by specifying the `type` option.
Also, if the argument `data` is passed as a string and the` type` option is not specified, then `type` ='string' is assumed (returns as a string).
```javascript
var sjisArray = [130, 168, 130, 205, 130, 230]; // 'おはよ' array in SJIS
var unicodeString = Encoding.convert(sjisArray, {
to: 'UNICODE',
from: 'SJIS',
type: 'string' // Specify 'string' to return as string
});
console.log(unicodeString); // 'おはよ'
```
The following `type` options are supported
* **string** : Return as a string
* **arraybuffer** : Return as an ArrayBuffer (`Uint16Array`)
* **array** : Return as an Array (*default*)
#### Replace to HTML entity (Numeric character reference) when cannot be represented
Characters that cannot be represented in the target character set are replaced with '?' (U+003F) by default but can be replaced with HTML entities by specifying the `fallback` option.
The `fallback` option supports the following values.
* **html-entity** : Replace to HTML entity (decimal HTML numeric character reference)
* **html-entity-hex** : Replace to HTML entity (hexadecimal HTML numeric character reference)
Example of specifying `{ fallback: 'html-entity' }` option
```javascript
var unicodeArray = Encoding.stringToCode('寿司🍣ビール🍺');
// No fallback specified
var sjisArray = Encoding.convert(unicodeArray, {
to: 'SJIS',
from: 'UNICODE'
});
console.log(sjisArray); // Converted to a code array of '寿司?ビール?'
// Specify `fallback: html-entity`
sjisArray = Encoding.convert(unicodeArray, {
to: 'SJIS',
from: 'UNICODE',
fallback: 'html-entity'
});
console.log(sjisArray); // Converted to a code array of '寿司&#127843;ビール&#127866;'
```
Example of specifying `{ fallback: 'html-entity-hex' }` option
```javascript
var unicodeArray = Encoding.stringToCode('ホッケの漢字は𩸽');
var sjisArray = Encoding.convert(unicodeArray, {
to: 'SJIS',
from: 'UNICODE',
fallback: 'html-entity-hex'
});
console.log(sjisArray); // Converted to a code array of 'ホッケの漢字は&#x29e3d;'
```
#### Specify BOM in UTF-16
You can add a BOM (byte order mark) by specifying the `bom` option when converting to `UTF16`.
The default is no BOM.
```javascript
var utf16Array = Encoding.convert(utf8Array, {
to: 'UTF16', // to_encoding
from: 'UTF8', // from_encoding
bom: true // Add BOM
});
```
`UTF16` byte order is big-endian by default.
If you want to convert as little-endian, specify the `{ bom: 'LE' }` option.
```javascript
var utf16leArray = Encoding.convert(utf8Array, {
to: 'UTF16', // to_encoding
from: 'UTF8', // from_encoding
bom: 'LE' // With BOM (little-endian)
});
```
If you do not need BOM, use `UTF16BE` or `UTF16LE`.
`UTF16BE` is big-endian, and `UTF16LE` is little-endian, and both have no BOM.
```javascript
var utf16beArray = Encoding.convert(utf8Array, {
to: 'UTF16BE',
from: 'UTF8'
});
```
### URL Encode/Decode
* {_string_} Encoding.**urlEncode** ( data )
URL(percent) encode.
@param {_Array_|_TypedArray_} _data_ Target data.
@return {_string_} Return the encoded string.
* {_Array_} Encoding.**urlDecode** ( string )
URL(percent) decode.
@param {_string_} _string_ Target data.
@return {_Array_} Return the decoded array.
```javascript
var sjisArray = [130, 177, 130, 241, 130, 201, 130, 191, 130, 205];
var encoded = Encoding.urlEncode(sjisArray);
console.log(encoded); // '%82%B1%82%F1%82%C9%82%BF%82%CD'
var decoded = Encoding.urlDecode(encoded);
console.log(decoded); // [130, 177, 130, 241, 130, 201, 130, 191, 130, 205]
```
### Base64 Encode/Decode
* {_string_} Encoding.**base64Encode** ( data )
Base64 encode.
@param {_Array_|_TypedArray_} _data_ Target data.
@return {_string_} Return the Base64 encoded string.
* {_Array_} Encoding.**base64Decode** ( string )
Base64 decode.
@param {_string_} _string_ Target data.
@return {_Array_} Return the Base64 decoded array.
```javascript
var sjisArray = [130, 177, 130, 241, 130, 201, 130, 191, 130, 205];
var encoded = Encoding.base64Encode(sjisArray);
console.log(encoded); // 'grGC8YLJgr+CzQ=='
var decoded = Encoding.base64Decode(encoded);
console.log(decoded); // [130, 177, 130, 241, 130, 201, 130, 191, 130, 205]
```
### Code array to string conversion (codeToString/stringToCode)
* {_string_} Encoding.**codeToString** ( {_Array_|_TypedArray_} data )
Joins a character code array to string.
* {_Array_} Encoding.**stringToCode** ( {_string_} string )
Splits string to an array of character codes.
### Japanese Zenkaku/Hankaku conversion
* {_Array|string_} Encoding.**toHankakuCase** ( {_Array|string_} data )
Convert the ascii symbols and alphanumeric characters to the zenkaku symbols and alphanumeric characters.
* {_Array|string_} Encoding.**toZenkakuCase** ( {_Array|string_} data )
Convert to the zenkaku symbols and alphanumeric characters from the ascii symbols and alphanumeric characters.
* {_Array|string_} Encoding.**toHiraganaCase** ( {_Array|string_} data )
Convert to the zenkaku hiragana from the zenkaku katakana.
* {_Array|string_} Encoding.**toKatakanaCase** ( {_Array|string_} data )
Convert to the zenkaku katakana from the zenkaku hiragana.
* {_Array|string_} Encoding.**toHankanaCase** ( {_Array|string_} data )
Convert to the hankaku katakana from the zenkaku katakana.
* {_Array|string_} Encoding.**toZenkanaCase** ( {_Array|string_} data )
Convert to the zenkaku katakana from the hankaku katakana.
* {_Array|string_} Encoding.**toHankakuSpace** ({_Array|string_} data )
Convert the em space(U+3000) to the single space(U+0020).
* {_Array|string_} Encoding.**toZenkakuSpace** ( {_Array|string_} data )
Convert the single space(U+0020) to the em space(U+3000).
## Other examples
### Example using the XMLHttpRequest and Typed arrays (Uint8Array)
This sample reads the text file written in Shift_JIS as binary data,
and displays a string that is converted to Unicode by Encoding.convert.
```javascript
var req = new XMLHttpRequest();
req.open('GET', '/my-shift_jis.txt', true);
req.responseType = 'arraybuffer';
req.onload = function (event) {
var buffer = req.response;
if (buffer) {
// Shift_JIS Array
var sjisArray = new Uint8Array(buffer);
// Convert encoding to UNICODE (JavaScript Unicode Array).
var unicodeArray = Encoding.convert(sjisArray, {
to: 'UNICODE',
from: 'SJIS'
});
// Join to string.
var unicodeString = Encoding.codeToString(unicodeArray);
console.log(unicodeString);
}
};
req.send(null);
```
### Convert encoding for file using the File APIs
Reads file using the File APIs.
Detect file encoding and convert to Unicode, and display it.
```html
<input type="file" id="file">
<div id="encoding"></div>
<textarea id="result" rows="5" cols="80"></textarea>
<script>
function onFileSelect(event) {
var file = event.target.files[0];
var reader = new FileReader();
reader.onload = function(e) {
var codes = new Uint8Array(e.target.result);
var encoding = Encoding.detect(codes);
document.getElementById('encoding').textContent = encoding;
// Convert encoding to unicode
var unicodeString = Encoding.convert(codes, {
to: 'unicode',
from: encoding,
type: 'string'
});
document.getElementById('result').value = unicodeString;
};
reader.readAsArrayBuffer(file);
}
document.getElementById('file').addEventListener('change', onFileSelect, false);
</script>
```
[**Demo**](http://polygonplanet.github.io/encoding.js/tests/detect-file-encoding.html)
## Contributing
We welcome contributions from everyone.
For bug reports and feature requests, please [create an issue on GitHub](https://github.com/polygonplanet/encoding.js/issues).
### Pull requests
Please run `$ npm run test` before the pull request to confirm there are no errors.
We only accept requests without errors.
## License
MIT

6077
test/imaps/node_modules/encoding-japanese/encoding.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

70
test/imaps/node_modules/encoding-japanese/package.json generated vendored Normal file
View File

@@ -0,0 +1,70 @@
{
"name": "encoding-japanese",
"version": "2.0.0",
"description": "Convert or detect character encoding in JavaScript",
"main": "src/index.js",
"files": [
"encoding.js",
"encoding.min.js",
"encoding.min.js.map",
"src/*"
],
"scripts": {
"build": "npm run compile && npm run minify",
"compile": "browserify src/index.js -o encoding.js -s Encoding -p [ bannerify --file src/banner.js ] --no-bundle-external --bare",
"minify": "uglifyjs encoding.js -o encoding.min.js --source-map \"url='encoding.min.js.map'\" --comments -c -m -b ascii_only=true,beautify=false",
"test": "./node_modules/.bin/eslint . && npm run build && mocha tests/test",
"watch": "watchify src/index.js -o encoding.js -s Encoding -p [ bannerify --file src/banner.js ] --no-bundle-external --bare --poll=300 -v"
},
"engines": {
"node": ">=8.10.0"
},
"repository": {
"type": "git",
"url": "https://github.com/polygonplanet/encoding.js.git"
},
"author": "polygonplanet <polygon.planet.aqua@gmail.com>",
"license": "MIT",
"bugs": {
"url": "https://github.com/polygonplanet/encoding.js/issues"
},
"homepage": "https://github.com/polygonplanet/encoding.js",
"keywords": [
"base64",
"charset",
"convert",
"detect",
"encoding",
"euc-jp",
"eucjp",
"iconv",
"iso-2022-jp",
"japanese",
"jis",
"shift_jis",
"sjis",
"unicode",
"urldecode",
"urlencode",
"utf-16",
"utf-32",
"utf-8"
],
"dependencies": {},
"devDependencies": {
"bannerify": "^1.0.1",
"browserify": "^17.0.0",
"eslint": "^8.12.0",
"mocha": "^9.2.2",
"package-json-versionify": "^1.0.4",
"power-assert": "^1.6.1",
"uglify-js": "^3.15.3",
"uglifyify": "^5.0.2",
"watchify": "^4.0.0"
},
"browserify": {
"transform": [
"package-json-versionify"
]
}
}

View File

@@ -0,0 +1,6 @@
/*!
* <%= pkg.name %> v<%= pkg.version %> - <%= pkg.description %>
* Copyright (c) 2012 <%= pkg.author %>
* <%= pkg.homepage %>
* @license <%= pkg.license %>
*/

139
test/imaps/node_modules/encoding-japanese/src/config.js generated vendored Normal file
View File

@@ -0,0 +1,139 @@
var util = require('./util');
var EncodingTable = require('./encoding-table');
// Fallback character when a character can't be represented
exports.FALLBACK_CHARACTER = 63; // '?'
var HAS_TYPED = exports.HAS_TYPED = typeof Uint8Array !== 'undefined' && typeof Uint16Array !== 'undefined';
// Test for String.fromCharCode.apply
var CAN_CHARCODE_APPLY = false;
var CAN_CHARCODE_APPLY_TYPED = false;
try {
if (String.fromCharCode.apply(null, [0x61]) === 'a') {
CAN_CHARCODE_APPLY = true;
}
} catch (e) {}
if (HAS_TYPED) {
try {
if (String.fromCharCode.apply(null, new Uint8Array([0x61])) === 'a') {
CAN_CHARCODE_APPLY_TYPED = true;
}
} catch (e) {}
}
exports.CAN_CHARCODE_APPLY = CAN_CHARCODE_APPLY;
exports.CAN_CHARCODE_APPLY_TYPED = CAN_CHARCODE_APPLY_TYPED;
// Function.prototype.apply stack max range
exports.APPLY_BUFFER_SIZE = 65533;
exports.APPLY_BUFFER_SIZE_OK = null;
var EncodingNames = exports.EncodingNames = {
UTF32: {
order: 0
},
UTF32BE: {
alias: ['UCS4']
},
UTF32LE: null,
UTF16: {
order: 1
},
UTF16BE: {
alias: ['UCS2']
},
UTF16LE: null,
BINARY: {
order: 2
},
ASCII: {
order: 3,
alias: ['ISO646', 'CP367']
},
JIS: {
order: 4,
alias: ['ISO2022JP']
},
UTF8: {
order: 5
},
EUCJP: {
order: 6
},
SJIS: {
order: 7,
alias: ['CP932', 'MSKANJI', 'WINDOWS31J']
},
UNICODE: {
order: 8
}
};
var EncodingAliases = {};
exports.EncodingAliases = EncodingAliases;
exports.EncodingOrders = (function() {
var aliases = EncodingAliases;
var names = util.objectKeys(EncodingNames);
var orders = [];
var name, encoding, j, l;
for (var i = 0, len = names.length; i < len; i++) {
name = names[i];
aliases[name] = name;
encoding = EncodingNames[name];
if (encoding != null) {
if (encoding.order != null) {
orders[orders.length] = name;
}
if (encoding.alias) {
// Create encoding aliases
for (j = 0, l = encoding.alias.length; j < l; j++) {
aliases[encoding.alias[j]] = name;
}
}
}
}
orders.sort(function(a, b) {
return EncodingNames[a].order - EncodingNames[b].order;
});
return orders;
}());
function init_JIS_TO_UTF8_TABLE() {
if (EncodingTable.JIS_TO_UTF8_TABLE === null) {
EncodingTable.JIS_TO_UTF8_TABLE = {};
var keys = util.objectKeys(EncodingTable.UTF8_TO_JIS_TABLE);
var i = 0;
var len = keys.length;
var key, value;
for (; i < len; i++) {
key = keys[i];
value = EncodingTable.UTF8_TO_JIS_TABLE[key];
if (value > 0x5F) {
EncodingTable.JIS_TO_UTF8_TABLE[value] = key | 0;
}
}
EncodingTable.JISX0212_TO_UTF8_TABLE = {};
keys = util.objectKeys(EncodingTable.UTF8_TO_JISX0212_TABLE);
len = keys.length;
for (i = 0; i < len; i++) {
key = keys[i];
value = EncodingTable.UTF8_TO_JISX0212_TABLE[key];
EncodingTable.JISX0212_TO_UTF8_TABLE[value] = key | 0;
}
}
}
exports.init_JIS_TO_UTF8_TABLE = init_JIS_TO_UTF8_TABLE;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,502 @@
/**
* Binary (exe, images and so, etc.)
*
* Note:
* This function is not considered for Unicode
*/
function isBINARY(data) {
var i = 0;
var len = data && data.length;
var c;
for (; i < len; i++) {
c = data[i];
if (c > 0xFF) {
return false;
}
if ((c >= 0x00 && c <= 0x07) || c === 0xFF) {
return true;
}
}
return false;
}
exports.isBINARY = isBINARY;
/**
* ASCII (ISO-646)
*/
function isASCII(data) {
var i = 0;
var len = data && data.length;
var b;
for (; i < len; i++) {
b = data[i];
if (b > 0xFF ||
(b >= 0x80 && b <= 0xFF) ||
b === 0x1B) {
return false;
}
}
return true;
}
exports.isASCII = isASCII;
/**
* ISO-2022-JP (JIS)
*
* RFC1468 Japanese Character Encoding for Internet Messages
* RFC1554 ISO-2022-JP-2: Multilingual Extension of ISO-2022-JP
* RFC2237 Japanese Character Encoding for Internet Messages
*/
function isJIS(data) {
var i = 0;
var len = data && data.length;
var b, esc1, esc2;
for (; i < len; i++) {
b = data[i];
if (b > 0xFF || (b >= 0x80 && b <= 0xFF)) {
return false;
}
if (b === 0x1B) {
if (i + 2 >= len) {
return false;
}
esc1 = data[i + 1];
esc2 = data[i + 2];
if (esc1 === 0x24) {
if (esc2 === 0x28 || // JIS X 0208-1990/2000/2004
esc2 === 0x40 || // JIS X 0208-1978
esc2 === 0x42) { // JIS X 0208-1983
return true;
}
} else if (esc1 === 0x26 && // JIS X 0208-1990
esc2 === 0x40) {
return true;
} else if (esc1 === 0x28) {
if (esc2 === 0x42 || // ASCII
esc2 === 0x49 || // JIS X 0201 Halfwidth Katakana
esc2 === 0x4A) { // JIS X 0201-1976 Roman set
return true;
}
}
}
}
return false;
}
exports.isJIS = isJIS;
/**
* EUC-JP
*/
function isEUCJP(data) {
var i = 0;
var len = data && data.length;
var b;
for (; i < len; i++) {
b = data[i];
if (b < 0x80) {
continue;
}
if (b > 0xFF || b < 0x8E) {
return false;
}
if (b === 0x8E) {
if (i + 1 >= len) {
return false;
}
b = data[++i];
if (b < 0xA1 || 0xDF < b) {
return false;
}
} else if (b === 0x8F) {
if (i + 2 >= len) {
return false;
}
b = data[++i];
if (b < 0xA2 || 0xED < b) {
return false;
}
b = data[++i];
if (b < 0xA1 || 0xFE < b) {
return false;
}
} else if (0xA1 <= b && b <= 0xFE) {
if (i + 1 >= len) {
return false;
}
b = data[++i];
if (b < 0xA1 || 0xFE < b) {
return false;
}
} else {
return false;
}
}
return true;
}
exports.isEUCJP = isEUCJP;
/**
* Shift-JIS (SJIS)
*/
function isSJIS(data) {
var i = 0;
var len = data && data.length;
var b;
while (i < len && data[i] > 0x80) {
if (data[i++] > 0xFF) {
return false;
}
}
for (; i < len; i++) {
b = data[i];
if (b <= 0x80 ||
(0xA1 <= b && b <= 0xDF)) {
continue;
}
if (b === 0xA0 || b > 0xEF || i + 1 >= len) {
return false;
}
b = data[++i];
if (b < 0x40 || b === 0x7F || b > 0xFC) {
return false;
}
}
return true;
}
exports.isSJIS = isSJIS;
/**
* UTF-8
*/
function isUTF8(data) {
var i = 0;
var len = data && data.length;
var b;
for (; i < len; i++) {
b = data[i];
if (b > 0xFF) {
return false;
}
if (b === 0x09 || b === 0x0A || b === 0x0D ||
(b >= 0x20 && b <= 0x7E)) {
continue;
}
if (b >= 0xC2 && b <= 0xDF) {
if (i + 1 >= len || data[i + 1] < 0x80 || data[i + 1] > 0xBF) {
return false;
}
i++;
} else if (b === 0xE0) {
if (i + 2 >= len ||
data[i + 1] < 0xA0 || data[i + 1] > 0xBF ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF) {
return false;
}
i += 2;
} else if ((b >= 0xE1 && b <= 0xEC) ||
b === 0xEE || b === 0xEF) {
if (i + 2 >= len ||
data[i + 1] < 0x80 || data[i + 1] > 0xBF ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF) {
return false;
}
i += 2;
} else if (b === 0xED) {
if (i + 2 >= len ||
data[i + 1] < 0x80 || data[i + 1] > 0x9F ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF) {
return false;
}
i += 2;
} else if (b === 0xF0) {
if (i + 3 >= len ||
data[i + 1] < 0x90 || data[i + 1] > 0xBF ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF ||
data[i + 3] < 0x80 || data[i + 3] > 0xBF) {
return false;
}
i += 3;
} else if (b >= 0xF1 && b <= 0xF3) {
if (i + 3 >= len ||
data[i + 1] < 0x80 || data[i + 1] > 0xBF ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF ||
data[i + 3] < 0x80 || data[i + 3] > 0xBF) {
return false;
}
i += 3;
} else if (b === 0xF4) {
if (i + 3 >= len ||
data[i + 1] < 0x80 || data[i + 1] > 0x8F ||
data[i + 2] < 0x80 || data[i + 2] > 0xBF ||
data[i + 3] < 0x80 || data[i + 3] > 0xBF) {
return false;
}
i += 3;
} else {
return false;
}
}
return true;
}
exports.isUTF8 = isUTF8;
/**
* UTF-16 (LE or BE)
*
* RFC2781: UTF-16, an encoding of ISO 10646
*
* @link http://www.ietf.org/rfc/rfc2781.txt
*/
function isUTF16(data) {
var i = 0;
var len = data && data.length;
var pos = null;
var b1, b2, next, prev;
if (len < 2) {
if (data[0] > 0xFF) {
return false;
}
} else {
b1 = data[0];
b2 = data[1];
if (b1 === 0xFF && // BOM (little-endian)
b2 === 0xFE) {
return true;
}
if (b1 === 0xFE && // BOM (big-endian)
b2 === 0xFF) {
return true;
}
for (; i < len; i++) {
if (data[i] === 0x00) {
pos = i;
break;
} else if (data[i] > 0xFF) {
return false;
}
}
if (pos === null) {
return false; // Non ASCII
}
next = data[pos + 1]; // BE
if (next !== void 0 && next > 0x00 && next < 0x80) {
return true;
}
prev = data[pos - 1]; // LE
if (prev !== void 0 && prev > 0x00 && prev < 0x80) {
return true;
}
}
return false;
}
exports.isUTF16 = isUTF16;
/**
* UTF-16BE (big-endian)
*
* RFC 2781 4.3 Interpreting text labelled as UTF-16
* Text labelled "UTF-16BE" can always be interpreted as being big-endian
* when BOM does not founds (SHOULD)
*
* @link http://www.ietf.org/rfc/rfc2781.txt
*/
function isUTF16BE(data) {
var i = 0;
var len = data && data.length;
var pos = null;
var b1, b2;
if (len < 2) {
if (data[0] > 0xFF) {
return false;
}
} else {
b1 = data[0];
b2 = data[1];
if (b1 === 0xFE && // BOM
b2 === 0xFF) {
return true;
}
for (; i < len; i++) {
if (data[i] === 0x00) {
pos = i;
break;
} else if (data[i] > 0xFF) {
return false;
}
}
if (pos === null) {
return false; // Non ASCII
}
if (pos % 2 === 0) {
return true;
}
}
return false;
}
exports.isUTF16BE = isUTF16BE;
/**
* UTF-16LE (little-endian)
*/
function isUTF16LE(data) {
var i = 0;
var len = data && data.length;
var pos = null;
var b1, b2;
if (len < 2) {
if (data[0] > 0xFF) {
return false;
}
} else {
b1 = data[0];
b2 = data[1];
if (b1 === 0xFF && // BOM
b2 === 0xFE) {
return true;
}
for (; i < len; i++) {
if (data[i] === 0x00) {
pos = i;
break;
} else if (data[i] > 0xFF) {
return false;
}
}
if (pos === null) {
return false; // Non ASCII
}
if (pos % 2 !== 0) {
return true;
}
}
return false;
}
exports.isUTF16LE = isUTF16LE;
/**
* UTF-32
*
* Unicode 3.2.0: Unicode Standard Annex #19
*
* @link http://www.iana.org/assignments/charset-reg/UTF-32
* @link http://www.unicode.org/reports/tr19/tr19-9.html
*/
function isUTF32(data) {
var i = 0;
var len = data && data.length;
var pos = null;
var b1, b2, b3, b4;
var next, prev;
if (len < 4) {
for (; i < len; i++) {
if (data[i] > 0xFF) {
return false;
}
}
} else {
b1 = data[0];
b2 = data[1];
b3 = data[2];
b4 = data[3];
if (b1 === 0x00 && b2 === 0x00 && // BOM (big-endian)
b3 === 0xFE && b4 === 0xFF) {
return true;
}
if (b1 === 0xFF && b2 === 0xFE && // BOM (little-endian)
b3 === 0x00 && b4 === 0x00) {
return true;
}
for (; i < len; i++) {
if (data[i] === 0x00 && data[i + 1] === 0x00 && data[i + 2] === 0x00) {
pos = i;
break;
} else if (data[i] > 0xFF) {
return false;
}
}
if (pos === null) {
return false;
}
// The byte order should be the big-endian when BOM is not detected.
next = data[pos + 3];
if (next !== void 0 && next > 0x00 && next <= 0x7F) {
// big-endian
return data[pos + 2] === 0x00 && data[pos + 1] === 0x00;
}
prev = data[pos - 1];
if (prev !== void 0 && prev > 0x00 && prev <= 0x7F) {
// little-endian
return data[pos + 1] === 0x00 && data[pos + 2] === 0x00;
}
}
return false;
}
exports.isUTF32 = isUTF32;
/**
* JavaScript Unicode array
*/
function isUNICODE(data) {
var i = 0;
var len = data && data.length;
var c;
for (; i < len; i++) {
c = data[i];
if (c < 0 || c > 0x10FFFF) {
return false;
}
}
return true;
}
exports.isUNICODE = isUNICODE;

View File

@@ -0,0 +1,4 @@
exports.UTF8_TO_JIS_TABLE = require('./utf8-to-jis-table');
exports.UTF8_TO_JISX0212_TABLE = require('./utf8-to-jisx0212-table');
exports.JIS_TO_UTF8_TABLE = require('./jis-to-utf8-table');
exports.JISX0212_TO_UTF8_TABLE = require('./jisx0212-to-utf8-table');

593
test/imaps/node_modules/encoding-japanese/src/index.js generated vendored Normal file
View File

@@ -0,0 +1,593 @@
var config = require('./config');
var util = require('./util');
var EncodingDetect = require('./encoding-detect');
var EncodingConvert = require('./encoding-convert');
var KanaCaseTable = require('./kana-case-table');
var version = require('../package.json').version;
var hasOwnProperty = Object.prototype.hasOwnProperty;
var Encoding = {
version: version,
/**
* Encoding orders
*/
orders: config.EncodingOrders,
/**
* Detects character encoding
*
* If encodings is "AUTO", or the encoding-list as an array, or
* comma separated list string it will be detected automatically
*
* @param {Array.<number>|TypedArray|string} data The data being detected
* @param {(Object|string|Array.<string>)=} [encodings] The encoding-list of
* character encoding
* @return {string|boolean} The detected character encoding, or false
*/
detect: function(data, encodings) {
if (data == null || data.length === 0) {
return false;
}
if (util.isObject(encodings) && !util.isArray(encodings)) {
encodings = encodings.encoding;
}
if (util.isString(data)) {
data = util.stringToBuffer(data);
}
if (encodings == null) {
encodings = Encoding.orders;
} else {
if (util.isString(encodings)) {
encodings = encodings.toUpperCase();
if (encodings === 'AUTO') {
encodings = Encoding.orders;
} else if (~encodings.indexOf(',')) {
encodings = encodings.split(/\s*,\s*/);
} else {
encodings = [encodings];
}
}
}
var len = encodings.length;
var e, encoding, method;
for (var i = 0; i < len; i++) {
e = encodings[i];
encoding = util.canonicalizeEncodingName(e);
if (!encoding) {
continue;
}
method = 'is' + encoding;
if (!hasOwnProperty.call(EncodingDetect, method)) {
throw new Error('Undefined encoding: ' + e);
}
if (EncodingDetect[method](data)) {
return encoding;
}
}
return false;
},
/**
* Convert character encoding
*
* If `from` is "AUTO", or the encoding-list as an array, or
* comma separated list string it will be detected automatically
*
* @param {Array.<number>|TypedArray|string} data The data being converted
* @param {(string|Object)} to The name of encoding to
* @param {(string|Array.<string>)=} [from] The encoding-list of
* character encoding
* @return {Array|TypedArray|string} The converted data
*/
convert: function(data, to, from) {
var result, type, options;
if (!util.isObject(to)) {
options = {};
} else {
options = to;
from = options.from;
to = options.to;
if (options.type) {
type = options.type;
}
}
if (util.isString(data)) {
type = type || 'string';
data = util.stringToBuffer(data);
} else if (data == null || data.length === 0) {
data = [];
}
var encodingFrom;
if (from != null && util.isString(from) &&
from.toUpperCase() !== 'AUTO' && !~from.indexOf(',')) {
encodingFrom = util.canonicalizeEncodingName(from);
} else {
encodingFrom = Encoding.detect(data);
}
var encodingTo = util.canonicalizeEncodingName(to);
var method = encodingFrom + 'To' + encodingTo;
if (hasOwnProperty.call(EncodingConvert, method)) {
result = EncodingConvert[method](data, options);
} else {
// Returns the raw data if the method is undefined
result = data;
}
switch (('' + type).toLowerCase()) {
case 'string':
return util.codeToString_fast(result);
case 'arraybuffer':
return util.codeToBuffer(result);
default: // array
return util.bufferToCode(result);
}
},
/**
* Encode a character code array to URL string like encodeURIComponent
*
* @param {Array.<number>|TypedArray} data The data being encoded
* @return {string} The percent encoded string
*/
urlEncode: function(data) {
if (util.isString(data)) {
data = util.stringToBuffer(data);
}
var alpha = util.stringToCode('0123456789ABCDEF');
var results = [];
var i = 0;
var len = data && data.length;
var b;
for (; i < len; i++) {
b = data[i];
// urlEncode is for an array of numbers in the range 0-255 (Uint8Array), but if an array
// of numbers greater than 255 is passed (Unicode code unit i.e. charCodeAt range),
// it will be tentatively encoded as UTF-8 using encodeURIComponent.
if (b > 0xFF) {
return encodeURIComponent(util.codeToString_fast(data));
}
if ((b >= 0x61 /*a*/ && b <= 0x7A /*z*/) ||
(b >= 0x41 /*A*/ && b <= 0x5A /*Z*/) ||
(b >= 0x30 /*0*/ && b <= 0x39 /*9*/) ||
b === 0x21 /*!*/ ||
(b >= 0x27 /*'*/ && b <= 0x2A /***/) ||
b === 0x2D /*-*/ || b === 0x2E /*.*/ ||
b === 0x5F /*_*/ || b === 0x7E /*~*/
) {
results[results.length] = b;
} else {
results[results.length] = 0x25; /*%*/
if (b < 0x10) {
results[results.length] = 0x30; /*0*/
results[results.length] = alpha[b];
} else {
results[results.length] = alpha[b >> 4 & 0xF];
results[results.length] = alpha[b & 0xF];
}
}
}
return util.codeToString_fast(results);
},
/**
* Decode a percent encoded string to
* character code array like decodeURIComponent
*
* @param {string} string The data being decoded
* @return {Array.<number>} The decoded array
*/
urlDecode: function(string) {
var results = [];
var i = 0;
var len = string && string.length;
var c;
while (i < len) {
c = string.charCodeAt(i++);
if (c === 0x25 /*%*/) {
results[results.length] = parseInt(
string.charAt(i++) + string.charAt(i++), 16);
} else {
results[results.length] = c;
}
}
return results;
},
/**
* Encode a character code array to Base64 encoded string
*
* @param {Array.<number>|TypedArray} data The data being encoded
* @return {string} The Base64 encoded string
*/
base64Encode: function(data) {
if (util.isString(data)) {
data = util.stringToBuffer(data);
}
return util.base64encode(data);
},
/**
* Decode a Base64 encoded string to character code array
*
* @param {string} string The data being decoded
* @return {Array.<number>} The decoded array
*/
base64Decode: function(string) {
return util.base64decode(string);
},
/**
* Joins a character code array to string
*
* @param {Array.<number>|TypedArray} data The data being joined
* @return {String} The joined string
*/
codeToString: util.codeToString_fast,
/**
* Splits string to an array of character codes
*
* @param {string} string The input string
* @return {Array.<number>} The character code array
*/
stringToCode: util.stringToCode,
/**
* 全角英数記号文字を半角英数記号文字に変換
*
* Convert the ascii symbols and alphanumeric characters to
* the zenkaku symbols and alphanumeric characters
*
* @example
* console.log(Encoding.toHankakuCase(' '));
* // 'Hello World! 12345'
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toHankakuCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c >= 0xFF01 && c <= 0xFF5E) {
c -= 0xFEE0;
}
results[results.length] = c;
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 半角英数記号文字を全角英数記号文字に変換
*
* Convert to the zenkaku symbols and alphanumeric characters
* from the ascii symbols and alphanumeric characters
*
* @example
* console.log(Encoding.toZenkakuCase('Hello World! 12345'));
* // ' '
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toZenkakuCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c >= 0x21 && c <= 0x7E) {
c += 0xFEE0;
}
results[results.length] = c;
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 全角カタカナを全角ひらがなに変換
*
* Convert to the zenkaku hiragana from the zenkaku katakana
*
* @example
* console.log(Encoding.toHiraganaCase('ボポヴァアィイゥウェエォオ'));
* // 'ぼぽう゛ぁあぃいぅうぇえぉお'
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toHiraganaCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c >= 0x30A1 && c <= 0x30F6) {
c -= 0x0060;
// 「ワ゛」 => 「わ」 + 「゛」
} else if (c === 0x30F7) {
results[results.length] = 0x308F;
c = 0x309B;
// 「ヲ゛」 => 「を」 + 「゛」
} else if (c === 0x30FA) {
results[results.length] = 0x3092;
c = 0x309B;
}
results[results.length] = c;
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 全角ひらがなを全角カタカナに変換
*
* Convert to the zenkaku katakana from the zenkaku hiragana
*
* @example
* console.log(Encoding.toKatakanaCase('ぼぽう゛ぁあぃいぅうぇえぉお'));
* // 'ボポヴァアィイゥウェエォオ'
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toKatakanaCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c >= 0x3041 && c <= 0x3096) {
if ((c === 0x308F || // 「わ」 + 「゛」 => 「ワ゛」
c === 0x3092) && // 「を」 + 「゛」 => 「ヲ゛」
i < len && data[i] === 0x309B) {
c = c === 0x308F ? 0x30F7 : 0x30FA;
i++;
} else {
c += 0x0060;
}
}
results[results.length] = c;
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 全角カタカナを半角カタカナに変換
*
* Convert to the hankaku katakana from the zenkaku katakana
*
* @example
* console.log(Encoding.toHankanaCase('ボポヴァアィイゥウェエォオ'));
* // 'ボポヴァアィイゥウェエォオ'
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toHankanaCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c, d, t;
while (i < len) {
c = data[i++];
if (c >= 0x3001 && c <= 0x30FC) {
t = KanaCaseTable.HANKANA_TABLE[c];
if (t !== void 0) {
results[results.length] = t;
continue;
}
}
// 「ヴ」, 「ワ」+「゛」, 「ヲ」+「゛」
if (c === 0x30F4 || c === 0x30F7 || c === 0x30FA) {
results[results.length] = KanaCaseTable.HANKANA_SONANTS[c];
results[results.length] = 0xFF9E;
// 「カ」 - 「ド」
} else if (c >= 0x30AB && c <= 0x30C9) {
results[results.length] = KanaCaseTable.HANKANA_TABLE[c - 1];
results[results.length] = 0xFF9E;
// 「ハ」 - 「ポ」
} else if (c >= 0x30CF && c <= 0x30DD) {
d = c % 3;
results[results.length] = KanaCaseTable.HANKANA_TABLE[c - d];
results[results.length] = KanaCaseTable.HANKANA_MARKS[d - 1];
} else {
results[results.length] = c;
}
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 半角カタカナを全角カタカナに変換 (濁音含む)
*
* Convert to the zenkaku katakana from the hankaku katakana
*
* @example
* console.log(Encoding.toZenkanaCase('ボポヴァアィイゥウェエォオ'));
* // 'ボポヴァアィイゥウェエォオ'
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toZenkanaCase: function(data) {
var asString = false;
if (util.isString(data)) {
asString = true;
data = util.stringToBuffer(data);
}
var results = [];
var len = data && data.length;
var i = 0;
var c, code, next;
for (i = 0; i < len; i++) {
c = data[i];
// Hankaku katakana
if (c > 0xFF60 && c < 0xFFA0) {
code = KanaCaseTable.ZENKANA_TABLE[c - 0xFF61];
if (i + 1 < len) {
next = data[i + 1];
// 「゙」 + 「ヴ」
if (next === 0xFF9E && c === 0xFF73) {
code = 0x30F4;
i++;
// 「゙」 + 「ワ゛」
} else if (next === 0xFF9E && c === 0xFF9C) {
code = 0x30F7;
i++;
// 「゙」 + 「ヲ゛」
} else if (next === 0xFF9E && c === 0xFF66) {
code = 0x30FA;
i++;
// 「゙」 + 「カ」 - 「コ」 or 「ハ」 - 「ホ」
} else if (next === 0xFF9E &&
((c > 0xFF75 && c < 0xFF85) ||
(c > 0xFF89 && c < 0xFF8F))) {
code++;
i++;
// 「゚」 + 「ハ」 - 「ホ」
} else if (next === 0xFF9F &&
(c > 0xFF89 && c < 0xFF8F)) {
code += 2;
i++;
}
}
c = code;
}
results[results.length] = c;
}
return asString ? util.codeToString_fast(results) : results;
},
/**
* 全角スペースを半角スペースに変換
*
* Convert the em space(U+3000) to the single space(U+0020)
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toHankakuSpace: function(data) {
if (util.isString(data)) {
return data.replace(/\u3000/g, ' ');
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c === 0x3000) {
c = 0x20;
}
results[results.length] = c;
}
return results;
},
/**
* 半角スペースを全角スペースに変換
*
* Convert the single space(U+0020) to the em space(U+3000)
*
* @param {Array.<number>|TypedArray|string} data The input unicode data
* @return {Array.<number>|string} The conveted data
*/
toZenkakuSpace: function(data) {
if (util.isString(data)) {
return data.replace(/\u0020/g, '\u3000');
}
var results = [];
var len = data && data.length;
var i = 0;
var c;
while (i < len) {
c = data[i++];
if (c === 0x20) {
c = 0x3000;
}
results[results.length] = c;
}
return results;
}
};
module.exports = Encoding;

View File

@@ -0,0 +1,5 @@
/**
* Encoding conversion table for JIS to UTF-8
*/
var JIS_TO_UTF8_TABLE = null;
module.exports = JIS_TO_UTF8_TABLE;

View File

@@ -0,0 +1,5 @@
/**
* Encoding conversion table for JIS X 0212:1990 (Hojo-Kanji) to UTF-8
*/
var JISX0212_TO_UTF8_TABLE = null;
module.exports = JISX0212_TO_UTF8_TABLE;

View File

@@ -0,0 +1,41 @@
/* eslint-disable key-spacing */
/**
* Katakana table
*/
exports.HANKANA_TABLE = {
0x3001:0xFF64,0x3002:0xFF61,0x300C:0xFF62,0x300D:0xFF63,0x309B:0xFF9E,
0x309C:0xFF9F,0x30A1:0xFF67,0x30A2:0xFF71,0x30A3:0xFF68,0x30A4:0xFF72,
0x30A5:0xFF69,0x30A6:0xFF73,0x30A7:0xFF6A,0x30A8:0xFF74,0x30A9:0xFF6B,
0x30AA:0xFF75,0x30AB:0xFF76,0x30AD:0xFF77,0x30AF:0xFF78,0x30B1:0xFF79,
0x30B3:0xFF7A,0x30B5:0xFF7B,0x30B7:0xFF7C,0x30B9:0xFF7D,0x30BB:0xFF7E,
0x30BD:0xFF7F,0x30BF:0xFF80,0x30C1:0xFF81,0x30C3:0xFF6F,0x30C4:0xFF82,
0x30C6:0xFF83,0x30C8:0xFF84,0x30CA:0xFF85,0x30CB:0xFF86,0x30CC:0xFF87,
0x30CD:0xFF88,0x30CE:0xFF89,0x30CF:0xFF8A,0x30D2:0xFF8B,0x30D5:0xFF8C,
0x30D8:0xFF8D,0x30DB:0xFF8E,0x30DE:0xFF8F,0x30DF:0xFF90,0x30E0:0xFF91,
0x30E1:0xFF92,0x30E2:0xFF93,0x30E3:0xFF6C,0x30E4:0xFF94,0x30E5:0xFF6D,
0x30E6:0xFF95,0x30E7:0xFF6E,0x30E8:0xFF96,0x30E9:0xFF97,0x30EA:0xFF98,
0x30EB:0xFF99,0x30EC:0xFF9A,0x30ED:0xFF9B,0x30EF:0xFF9C,0x30F2:0xFF66,
0x30F3:0xFF9D,0x30FB:0xFF65,0x30FC:0xFF70
};
exports.HANKANA_SONANTS = {
0x30F4:0xFF73,
0x30F7:0xFF9C,
0x30FA:0xFF66
};
exports.HANKANA_MARKS = [0xFF9E, 0xFF9F];
/**
* Zenkaku table [U+FF61] - [U+FF9F]
*/
exports.ZENKANA_TABLE = [
0x3002, 0x300C, 0x300D, 0x3001, 0x30FB, 0x30F2, 0x30A1, 0x30A3,
0x30A5, 0x30A7, 0x30A9, 0x30E3, 0x30E5, 0x30E7, 0x30C3, 0x30FC,
0x30A2, 0x30A4, 0x30A6, 0x30A8, 0x30AA, 0x30AB, 0x30AD, 0x30AF,
0x30B1, 0x30B3, 0x30B5, 0x30B7, 0x30B9, 0x30BB, 0x30BD, 0x30BF,
0x30C1, 0x30C4, 0x30C6, 0x30C8, 0x30CA, 0x30CB, 0x30CC, 0x30CD,
0x30CE, 0x30CF, 0x30D2, 0x30D5, 0x30D8, 0x30DB, 0x30DE, 0x30DF,
0x30E0, 0x30E1, 0x30E2, 0x30E4, 0x30E6, 0x30E8, 0x30E9, 0x30EA,
0x30EB, 0x30EC, 0x30ED, 0x30EF, 0x30F3, 0x309B, 0x309C
];

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

361
test/imaps/node_modules/encoding-japanese/src/util.js generated vendored Normal file
View File

@@ -0,0 +1,361 @@
var config = require('./config');
var fromCharCode = String.fromCharCode;
var slice = Array.prototype.slice;
var toString = Object.prototype.toString;
var hasOwnProperty = Object.prototype.hasOwnProperty;
var nativeIsArray = Array.isArray;
var nativeObjectKeys = Object.keys;
function isObject(x) {
var type = typeof x;
return type === 'function' || type === 'object' && !!x;
}
exports.isObject = isObject;
function isArray(x) {
return nativeIsArray ? nativeIsArray(x) : toString.call(x) === '[object Array]';
}
exports.isArray = isArray;
function isString(x) {
return typeof x === 'string' || toString.call(x) === '[object String]';
}
exports.isString = isString;
function objectKeys(object) {
if (nativeObjectKeys) {
return nativeObjectKeys(object);
}
var keys = [];
for (var key in object) {
if (hasOwnProperty.call(object, key)) {
keys[keys.length] = key;
}
}
return keys;
}
exports.objectKeys = objectKeys;
function createBuffer(bits, size) {
if (config.HAS_TYPED) {
switch (bits) {
case 8: return new Uint8Array(size);
case 16: return new Uint16Array(size);
}
}
return new Array(size);
}
exports.createBuffer = createBuffer;
function stringToBuffer(string) {
var length = string.length;
var buffer = createBuffer(16, length);
for (var i = 0; i < length; i++) {
buffer[i] = string.charCodeAt(i);
}
return buffer;
}
exports.stringToBuffer = stringToBuffer;
function codeToString_fast(code) {
if (config.CAN_CHARCODE_APPLY && config.CAN_CHARCODE_APPLY_TYPED) {
var len = code && code.length;
if (len < config.APPLY_BUFFER_SIZE && config.APPLY_BUFFER_SIZE_OK) {
return fromCharCode.apply(null, code);
}
if (config.APPLY_BUFFER_SIZE_OK === null) {
try {
var s = fromCharCode.apply(null, code);
if (len > config.APPLY_BUFFER_SIZE) {
config.APPLY_BUFFER_SIZE_OK = true;
}
return s;
} catch (e) {
// Ignore the RangeError "arguments too large"
config.APPLY_BUFFER_SIZE_OK = false;
}
}
}
return codeToString_chunked(code);
}
exports.codeToString_fast = codeToString_fast;
function codeToString_chunked(code) {
var string = '';
var length = code && code.length;
var i = 0;
var sub;
while (i < length) {
if (code.subarray) {
sub = code.subarray(i, i + config.APPLY_BUFFER_SIZE);
} else {
sub = code.slice(i, i + config.APPLY_BUFFER_SIZE);
}
i += config.APPLY_BUFFER_SIZE;
if (config.APPLY_BUFFER_SIZE_OK) {
string += fromCharCode.apply(null, sub);
continue;
}
if (config.APPLY_BUFFER_SIZE_OK === null) {
try {
string += fromCharCode.apply(null, sub);
if (sub.length > config.APPLY_BUFFER_SIZE) {
config.APPLY_BUFFER_SIZE_OK = true;
}
continue;
} catch (e) {
config.APPLY_BUFFER_SIZE_OK = false;
}
}
return codeToString_slow(code);
}
return string;
}
exports.codeToString_chunked = codeToString_chunked;
function codeToString_slow(code) {
var string = '';
var length = code && code.length;
for (var i = 0; i < length; i++) {
string += fromCharCode(code[i]);
}
return string;
}
exports.codeToString_slow = codeToString_slow;
function stringToCode(string) {
var code = [];
var len = string && string.length;
for (var i = 0; i < len; i++) {
code[i] = string.charCodeAt(i);
}
return code;
}
exports.stringToCode = stringToCode;
function codeToBuffer(code) {
if (config.HAS_TYPED) {
// Unicode code point (charCodeAt range) values have a range of 0-0xFFFF, so use Uint16Array
return new Uint16Array(code);
}
if (isArray(code)) {
return code;
}
var length = code && code.length;
var buffer = [];
for (var i = 0; i < length; i++) {
buffer[i] = code[i];
}
return buffer;
}
exports.codeToBuffer = codeToBuffer;
function bufferToCode(buffer) {
if (isArray(buffer)) {
return buffer;
}
return slice.call(buffer);
}
exports.bufferToCode = bufferToCode;
/**
* Canonicalize the passed encoding name to the internal encoding name
*/
function canonicalizeEncodingName(target) {
var name = '';
var expect = ('' + target).toUpperCase().replace(/[^A-Z0-9]+/g, '');
var aliasNames = objectKeys(config.EncodingAliases);
var len = aliasNames.length;
var hit = 0;
var encoding, encodingLen, j;
for (var i = 0; i < len; i++) {
encoding = aliasNames[i];
if (encoding === expect) {
name = encoding;
break;
}
encodingLen = encoding.length;
for (j = hit; j < encodingLen; j++) {
if (encoding.slice(0, j) === expect.slice(0, j) ||
encoding.slice(-j) === expect.slice(-j)) {
name = encoding;
hit = j;
}
}
}
if (hasOwnProperty.call(config.EncodingAliases, name)) {
return config.EncodingAliases[name];
}
return name;
}
exports.canonicalizeEncodingName = canonicalizeEncodingName;
// Base64
/* Copyright (C) 1999 Masanao Izumo <iz@onicos.co.jp>
* Version: 1.0
* LastModified: Dec 25 1999
* This library is free. You can redistribute it and/or modify it.
*/
// -- Masanao Izumo Copyright 1999 "free"
// Added binary array support for the Encoding.js
var base64EncodeChars = [
65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47
];
var base64DecodeChars = [
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
-1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1
];
var base64EncodePadding = '='.charCodeAt(0);
function base64encode(data) {
var out, i, len;
var c1, c2, c3;
len = data && data.length;
i = 0;
out = [];
while (i < len) {
c1 = data[i++];
if (i == len) {
out[out.length] = base64EncodeChars[c1 >> 2];
out[out.length] = base64EncodeChars[(c1 & 0x3) << 4];
out[out.length] = base64EncodePadding;
out[out.length] = base64EncodePadding;
break;
}
c2 = data[i++];
if (i == len) {
out[out.length] = base64EncodeChars[c1 >> 2];
out[out.length] = base64EncodeChars[((c1 & 0x3) << 4) | ((c2 & 0xF0) >> 4)];
out[out.length] = base64EncodeChars[(c2 & 0xF) << 2];
out[out.length] = base64EncodePadding;
break;
}
c3 = data[i++];
out[out.length] = base64EncodeChars[c1 >> 2];
out[out.length] = base64EncodeChars[((c1 & 0x3) << 4) | ((c2 & 0xF0) >> 4)];
out[out.length] = base64EncodeChars[((c2 & 0xF) << 2) | ((c3 & 0xC0) >> 6)];
out[out.length] = base64EncodeChars[c3 & 0x3F];
}
return codeToString_fast(out);
}
exports.base64encode = base64encode;
function base64decode(str) {
var c1, c2, c3, c4;
var i, len, out;
len = str && str.length;
i = 0;
out = [];
while (i < len) {
/* c1 */
do {
c1 = base64DecodeChars[str.charCodeAt(i++) & 0xFF];
} while (i < len && c1 == -1);
if (c1 == -1) {
break;
}
/* c2 */
do {
c2 = base64DecodeChars[str.charCodeAt(i++) & 0xFF];
} while (i < len && c2 == -1);
if (c2 == -1) {
break;
}
out[out.length] = (c1 << 2) | ((c2 & 0x30) >> 4);
/* c3 */
do {
c3 = str.charCodeAt(i++) & 0xFF;
if (c3 == 61) {
return out;
}
c3 = base64DecodeChars[c3];
} while (i < len && c3 == -1);
if (c3 == -1) {
break;
}
out[out.length] = ((c2 & 0xF) << 4) | ((c3 & 0x3C) >> 2);
/* c4 */
do {
c4 = str.charCodeAt(i++) & 0xFF;
if (c4 == 61) {
return out;
}
c4 = base64DecodeChars[c4];
} while (i < len && c4 == -1);
if (c4 == -1) {
break;
}
out[out.length] = ((c3 & 0x03) << 6) | c4;
}
return out;
}
exports.base64decode = base64decode;