Merge remote-tracking branch 'origin/master' into master

pull/737/head
飞鸟队员 4 years ago
commit 2844989f0e
  1. 1
      app/src/main/java/io/legado/app/base/adapter/CommonRecyclerAdapter.kt
  2. 4492
      app/src/main/java/io/legado/app/utils/EncodingDetect.java
  3. 82
      app/src/main/java/io/legado/app/utils/EncodingDetect.kt
  4. 568
      app/src/main/java/io/legado/app/utils/icu4j/CharsetDetector.java
  5. 239
      app/src/main/java/io/legado/app/utils/icu4j/CharsetMatch.java
  6. 164
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecog_2022.java
  7. 97
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecog_UTF8.java
  8. 186
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecog_Unicode.java
  9. 540
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecog_mbcs.java
  10. 1144
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecog_sbcs.java
  11. 53
      app/src/main/java/io/legado/app/utils/icu4j/CharsetRecognizer.java

@ -43,7 +43,6 @@ abstract class CommonRecyclerAdapter<ITEM, VB : ViewBinding>(protected val conte
private var itemClickListener: ((holder: ItemViewHolder, item: ITEM) -> Unit)? = null
private var itemLongClickListener: ((holder: ItemViewHolder, item: ITEM) -> Boolean)? = null
// 这个用Kotlin的setter就行了, 不需要手动开一个函数进行设置
var itemAnimation: ItemAnimation? = null
fun setOnItemClickListener(listener: (holder: ItemViewHolder, item: ITEM) -> Unit) {

File diff suppressed because it is too large Load Diff

@ -0,0 +1,82 @@
package io.legado.app.utils
import android.text.TextUtils
import io.legado.app.utils.icu4j.CharsetDetector
import org.jsoup.Jsoup
import java.io.File
import java.io.FileInputStream
import java.nio.charset.StandardCharsets
import java.util.*
/**
* 自动获取文件的编码
* */
@Suppress("MemberVisibilityCanBePrivate", "unused")
object EncodingDetect {
fun getHtmlEncode(bytes: ByteArray): String? {
try {
val doc = Jsoup.parse(String(bytes, StandardCharsets.UTF_8))
val metaTags = doc.getElementsByTag("meta")
var charsetStr: String
for (metaTag in metaTags) {
charsetStr = metaTag.attr("charset")
if (!TextUtils.isEmpty(charsetStr)) {
return charsetStr
}
val content = metaTag.attr("content")
val httpEquiv = metaTag.attr("http-equiv")
if (httpEquiv.toLowerCase(Locale.getDefault()) == "content-type") {
charsetStr = if (content.toLowerCase(Locale.getDefault()).contains("charset")) {
content.substring(
content.toLowerCase(Locale.getDefault())
.indexOf("charset") + "charset=".length
)
} else {
content.substring(content.toLowerCase(Locale.getDefault()).indexOf(";") + 1)
}
if (!TextUtils.isEmpty(charsetStr)) {
return charsetStr
}
}
}
} catch (ignored: Exception) {
}
return getEncode(bytes)
}
fun getEncode(bytes: ByteArray): String {
val detector = CharsetDetector()
detector.setText(bytes)
val match = detector.detect()
return match.name
}
/**
* 得到文件的编码
*/
fun getEncode(filePath: String): String {
return getEncode(File(filePath))
}
/**
* 得到文件的编码
*/
fun getEncode(file: File): String {
val tempByte = getFileBytes(file)
return getEncode(tempByte)
}
private fun getFileBytes(testFile: File?): ByteArray {
val fis: FileInputStream
val byteArray: ByteArray = ByteArray(2000)
try {
fis = FileInputStream(testFile)
fis.read(byteArray)
fis.close()
} catch (e: Exception) {
System.err.println("Error: $e")
}
return byteArray
}
}

@ -0,0 +1,568 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/**
* ******************************************************************************
* Copyright (C) 2005-2014, International Business Machines Corporation and *
* others. All Rights Reserved. *
* ******************************************************************************
*/
package io.legado.app.utils.icu4j;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* <code>CharsetDetector</code> provides a facility for detecting the
* charset or encoding of character data in an unknown format.
* The input data can either be from an input stream or an array of bytes.
* The result of the detection operation is a list of possibly matching
* charsets, or, for simple use, you can just ask for a Java Reader that
* will will work over the input data.
* <p/>
* Character set detection is at best an imprecise operation. The detection
* process will attempt to identify the charset that best matches the characteristics
* of the byte data, but the process is partly statistical in nature, and
* the results can not be guaranteed to always be correct.
* <p/>
* For best accuracy in charset detection, the input data should be primarily
* in a single language, and a minimum of a few hundred bytes worth of plain text
* in the language are needed. The detection process will attempt to
* ignore html or xml style markup that could otherwise obscure the content.
* <p/>
*
* @stable ICU 3.4
* @hide All android.icu classes are currently hidden
*/
public class CharsetDetector {
// Question: Should we have getters corresponding to the setters for input text
// and declared encoding?
// A thought: If we were to create our own type of Java Reader, we could defer
// figuring out an actual charset for data that starts out with too much English
// only ASCII until the user actually read through to something that didn't look
// like 7 bit English. If nothing else ever appeared, we would never need to
// actually choose the "real" charset. All assuming that the application just
// wants the data, and doesn't care about a char set name.
/**
* Constructor
*
* @stable ICU 3.4
*/
public CharsetDetector() {
}
/**
* Set the declared encoding for charset detection.
* The declared encoding of an input text is an encoding obtained
* from an http header or xml declaration or similar source that
* can be provided as additional information to the charset detector.
* A match between a declared encoding and a possible detected encoding
* will raise the quality of that detected encoding by a small delta,
* and will also appear as a "reason" for the match.
* <p/>
* A declared encoding that is incompatible with the input data being
* analyzed will not be added to the list of possible encodings.
*
* @param encoding The declared encoding
* @stable ICU 3.4
*/
public CharsetDetector setDeclaredEncoding(String encoding) {
fDeclaredEncoding = encoding;
return this;
}
/**
* Set the input text (byte) data whose charset is to be detected.
*
* @param in the input text of unknown encoding
* @return This CharsetDetector
* @stable ICU 3.4
*/
public CharsetDetector setText(byte[] in) {
fRawInput = in;
fRawLength = in.length;
return this;
}
private static final int kBufSize = 8000;
/**
* Set the input text (byte) data whose charset is to be detected.
* <p/>
* The input stream that supplies the character data must have markSupported()
* == true; the charset detection process will read a small amount of data,
* then return the stream to its original position via
* the InputStream.reset() operation. The exact amount that will
* be read depends on the characteristics of the data itself.
*
* @param in the input text of unknown encoding
* @return This CharsetDetector
* @stable ICU 3.4
*/
public CharsetDetector setText(InputStream in) throws IOException {
fInputStream = in;
fInputStream.mark(kBufSize);
fRawInput = new byte[kBufSize]; // Always make a new buffer because the
// previous one may have come from the caller,
// in which case we can't touch it.
fRawLength = 0;
int remainingLength = kBufSize;
while (remainingLength > 0) {
// read() may give data in smallish chunks, esp. for remote sources. Hence, this loop.
int bytesRead = fInputStream.read(fRawInput, fRawLength, remainingLength);
if (bytesRead <= 0) {
break;
}
fRawLength += bytesRead;
remainingLength -= bytesRead;
}
fInputStream.reset();
return this;
}
/**
* Return the charset that best matches the supplied input data.
* <p>
* Note though, that because the detection
* only looks at the start of the input data,
* there is a possibility that the returned charset will fail to handle
* the full set of input data.
* p/>
* aise an exception if
* <ul>
* <li>no charset appears to match the data.</li>
* <li>no input text has been provided</li>
* </ul>
*
* @return a CharsetMatch object representing the best matching charset, or
* <code>null</code> if there are no matches.
* @stable ICU 3.4
*/
public CharsetMatch detect() {
// TODO: A better implementation would be to copy the detect loop from
// detectAll(), and cut it short as soon as a match with a high confidence
// is found. This is something to be done later, after things are otherwise
// working.
CharsetMatch matches[] = detectAll();
if (matches == null || matches.length == 0) {
return null;
}
return matches[0];
}
/**
* Return an array of all charsets that appear to be plausible
* matches with the input data. The array is ordered with the
* best quality match first.
* <p/>
* aise an exception if
* <ul>
* <li>no charsets appear to match the input data.</li>
* <li>no input text has been provided</li>
* </ul>
*
* @return An array of CharsetMatch objects representing possibly matching charsets.
* @stable ICU 3.4
*/
public CharsetMatch[] detectAll() {
ArrayList<CharsetMatch> matches = new ArrayList<CharsetMatch>();
MungeInput(); // Strip html markup, collect byte stats.
// Iterate over all possible charsets, remember all that
// give a match quality > 0.
for (int i = 0; i < ALL_CS_RECOGNIZERS.size(); i++) {
CSRecognizerInfo rcinfo = ALL_CS_RECOGNIZERS.get(i);
boolean active = (fEnabledRecognizers != null) ? fEnabledRecognizers[i] : rcinfo.isDefaultEnabled;
if (active) {
CharsetMatch m = rcinfo.recognizer.match(this);
if (m != null) {
matches.add(m);
}
}
}
Collections.sort(matches); // CharsetMatch compares on confidence
Collections.reverse(matches); // Put best match first.
CharsetMatch[] resultArray = new CharsetMatch[matches.size()];
resultArray = matches.toArray(resultArray);
return resultArray;
}
/**
* Autodetect the charset of an inputStream, and return a Java Reader
* to access the converted input data.
* <p/>
* This is a convenience method that is equivalent to
* <code>this.setDeclaredEncoding(declaredEncoding).setText(in).detect().getReader();</code>
* <p/>
* For the input stream that supplies the character data, markSupported()
* must be true; the charset detection will read a small amount of data,
* then return the stream to its original position via
* the InputStream.reset() operation. The exact amount that will
* be read depends on the characteristics of the data itself.
* <p/>
* Raise an exception if no charsets appear to match the input data.
*
* @param in The source of the byte data in the unknown charset.
* @param declaredEncoding A declared encoding for the data, if available,
* or null or an empty string if none is available.
* @stable ICU 3.4
*/
public Reader getReader(InputStream in, String declaredEncoding) {
fDeclaredEncoding = declaredEncoding;
try {
setText(in);
CharsetMatch match = detect();
if (match == null) {
return null;
}
return match.getReader();
} catch (IOException e) {
return null;
}
}
/**
* Autodetect the charset of an inputStream, and return a String
* containing the converted input data.
* <p/>
* This is a convenience method that is equivalent to
* <code>this.setDeclaredEncoding(declaredEncoding).setText(in).detect().getString();</code>
* <p/>
* Raise an exception if no charsets appear to match the input data.
*
* @param in The source of the byte data in the unknown charset.
* @param declaredEncoding A declared encoding for the data, if available,
* or null or an empty string if none is available.
* @stable ICU 3.4
*/
public String getString(byte[] in, String declaredEncoding) {
fDeclaredEncoding = declaredEncoding;
try {
setText(in);
CharsetMatch match = detect();
if (match == null) {
return null;
}
return match.getString(-1);
} catch (IOException e) {
return null;
}
}
/**
* Get the names of all charsets supported by <code>CharsetDetector</code> class.
* <p>
* <b>Note:</b> Multiple different charset encodings in a same family may use
* a single shared name in this implementation. For example, this method returns
* an array including "ISO-8859-1" (ISO Latin 1), but not including "windows-1252"
* (Windows Latin 1). However, actual detection result could be "windows-1252"
* when the input data matches Latin 1 code points with any points only available
* in "windows-1252".
*
* @return an array of the names of all charsets supported by
* <code>CharsetDetector</code> class.
* @stable ICU 3.4
*/
public static String[] getAllDetectableCharsets() {
String[] allCharsetNames = new String[ALL_CS_RECOGNIZERS.size()];
for (int i = 0; i < allCharsetNames.length; i++) {
allCharsetNames[i] = ALL_CS_RECOGNIZERS.get(i).recognizer.getName();
}
return allCharsetNames;
}
/**
* Test whether or not input filtering is enabled.
*
* @return <code>true</code> if input text will be filtered.
* @stable ICU 3.4
* @see #enableInputFilter
*/
public boolean inputFilterEnabled() {
return fStripTags;
}
/**
* Enable filtering of input text. If filtering is enabled,
* text within angle brackets ("<" and ">") will be removed
* before detection.
*
* @param filter <code>true</code> to enable input text filtering.
* @return The previous setting.
* @stable ICU 3.4
*/
public boolean enableInputFilter(boolean filter) {
boolean previous = fStripTags;
fStripTags = filter;
return previous;
}
/*
* MungeInput - after getting a set of raw input data to be analyzed, preprocess
* it by removing what appears to be html markup.
*/
private void MungeInput() {
int srci = 0;
int dsti = 0;
byte b;
boolean inMarkup = false;
int openTags = 0;
int badTags = 0;
//
// html / xml markup stripping.
// quick and dirty, not 100% accurate, but hopefully good enough, statistically.
// discard everything within < brackets >
// Count how many total '<' and illegal (nested) '<' occur, so we can make some
// guess as to whether the input was actually marked up at all.
if (fStripTags) {
for (srci = 0; srci < fRawLength && dsti < fInputBytes.length; srci++) {
b = fRawInput[srci];
if (b == (byte) '<') {
if (inMarkup) {
badTags++;
}
inMarkup = true;
openTags++;
}
if (!inMarkup) {
fInputBytes[dsti++] = b;
}
if (b == (byte) '>') {
inMarkup = false;
}
}
fInputLen = dsti;
}
//
// If it looks like this input wasn't marked up, or if it looks like it's
// essentially nothing but markup abandon the markup stripping.
// Detection will have to work on the unstripped input.
//
if (openTags < 5 || openTags / 5 < badTags ||
(fInputLen < 100 && fRawLength > 600)) {
int limit = fRawLength;
if (limit > kBufSize) {
limit = kBufSize;
}
for (srci = 0; srci < limit; srci++) {
fInputBytes[srci] = fRawInput[srci];
}
fInputLen = srci;
}
//
// Tally up the byte occurence statistics.
// These are available for use by the various detectors.
//
Arrays.fill(fByteStats, (short) 0);
for (srci = 0; srci < fInputLen; srci++) {
int val = fInputBytes[srci] & 0x00ff;
fByteStats[val]++;
}
fC1Bytes = false;
for (int i = 0x80; i <= 0x9F; i += 1) {
if (fByteStats[i] != 0) {
fC1Bytes = true;
break;
}
}
}
/*
* The following items are accessed by individual CharsetRecongizers during
* the recognition process
*
*/
byte[] fInputBytes = // The text to be checked. Markup will have been
new byte[kBufSize]; // removed if appropriate.
int fInputLen; // Length of the byte data in fInputBytes.
short fByteStats[] = // byte frequency statistics for the input text.
new short[256]; // Value is percent, not absolute.
// Value is rounded up, so zero really means zero occurences.
boolean fC1Bytes = // True if any bytes in the range 0x80 - 0x9F are in the input;
false;
String fDeclaredEncoding;
byte[] fRawInput; // Original, untouched input bytes.
// If user gave us a byte array, this is it.
// If user gave us a stream, it's read to a
// buffer here.
int fRawLength; // Length of data in fRawInput array.
InputStream fInputStream; // User's input stream, or null if the user
// gave us a byte array.
//
// Stuff private to CharsetDetector
//
private boolean fStripTags = // If true, setText() will strip tags from input text.
false;
private boolean[] fEnabledRecognizers; // If not null, active set of charset recognizers had
// been changed from the default. The array index is
// corresponding to ALL_RECOGNIZER. See setDetectableCharset().
private static class CSRecognizerInfo {
CharsetRecognizer recognizer;
boolean isDefaultEnabled;
CSRecognizerInfo(CharsetRecognizer recognizer, boolean isDefaultEnabled) {
this.recognizer = recognizer;
this.isDefaultEnabled = isDefaultEnabled;
}
}
/*
* List of recognizers for all charsets known to the implementation.
*/
private static final List<CSRecognizerInfo> ALL_CS_RECOGNIZERS;
static {
List<CSRecognizerInfo> list = new ArrayList<CSRecognizerInfo>();
list.add(new CSRecognizerInfo(new CharsetRecog_UTF8(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_Unicode.CharsetRecog_UTF_16_BE(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_Unicode.CharsetRecog_UTF_16_LE(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_Unicode.CharsetRecog_UTF_32_BE(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_Unicode.CharsetRecog_UTF_32_LE(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_mbcs.CharsetRecog_sjis(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_2022.CharsetRecog_2022JP(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_2022.CharsetRecog_2022CN(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_2022.CharsetRecog_2022KR(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_gb_18030(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_euc_jp(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_euc_kr(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_mbcs.CharsetRecog_big5(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_1(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_2(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_5_ru(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_6_ar(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_7_el(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_8_I_he(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_8_he(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_windows_1251(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_windows_1256(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_KOI8_R(), true));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_8859_9_tr(), true));
// IBM 420/424 recognizers are disabled by default
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_IBM424_he_rtl(), false));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_IBM424_he_ltr(), false));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_IBM420_ar_rtl(), false));
list.add(new CSRecognizerInfo(new CharsetRecog_sbcs.CharsetRecog_IBM420_ar_ltr(), false));
ALL_CS_RECOGNIZERS = Collections.unmodifiableList(list);
}
/**
* Get the names of charsets that can be recognized by this CharsetDetector instance.
*
* @return an array of the names of charsets that can be recognized by this CharsetDetector
* instance.
* <p>
* {@literal @}internal
* @deprecated This API is ICU internal only.
*/
@Deprecated
public String[] getDetectableCharsets() {
List<String> csnames = new ArrayList<String>(ALL_CS_RECOGNIZERS.size());
for (int i = 0; i < ALL_CS_RECOGNIZERS.size(); i++) {
CSRecognizerInfo rcinfo = ALL_CS_RECOGNIZERS.get(i);
boolean active = (fEnabledRecognizers == null) ? rcinfo.isDefaultEnabled : fEnabledRecognizers[i];
if (active) {
csnames.add(rcinfo.recognizer.getName());
}
}
return csnames.toArray(new String[csnames.size()]);
}
/**
* Enable or disable individual charset encoding.
* A name of charset encoding must be included in the names returned by
* {@link #getAllDetectableCharsets()}.
*
* @param encoding the name of charset encoding.
* @param enabled <code>true</code> to enable, or <code>false</code> to disable the
* charset encoding.
* @return A reference to this <code>CharsetDetector</code>.
* @throws IllegalArgumentException when the name of charset encoding is
* not supported.
* <p>
* {@literal @}internal
* @deprecated This API is ICU internal only.
*/
@Deprecated
public CharsetDetector setDetectableCharset(String encoding, boolean enabled) {
int modIdx = -1;
boolean isDefaultVal = false;
for (int i = 0; i < ALL_CS_RECOGNIZERS.size(); i++) {
CSRecognizerInfo csrinfo = ALL_CS_RECOGNIZERS.get(i);
if (csrinfo.recognizer.getName().equals(encoding)) {
modIdx = i;
isDefaultVal = (csrinfo.isDefaultEnabled == enabled);
break;
}
}
if (modIdx < 0) {
// No matching encoding found
throw new IllegalArgumentException("Invalid encoding: " + "\"" + encoding + "\"");
}
if (fEnabledRecognizers == null && !isDefaultVal) {
// Create an array storing the non default setting
fEnabledRecognizers = new boolean[ALL_CS_RECOGNIZERS.size()];
// Initialize the array with default info
for (int i = 0; i < ALL_CS_RECOGNIZERS.size(); i++) {
fEnabledRecognizers[i] = ALL_CS_RECOGNIZERS.get(i).isDefaultEnabled;
}
}
if (fEnabledRecognizers != null) {
fEnabledRecognizers[modIdx] = enabled;
}
return this;
}
}

@ -0,0 +1,239 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/**
* ******************************************************************************
* Copyright (C) 2005-2012, International Business Machines Corporation and *
* others. All Rights Reserved. *
* ******************************************************************************
*/
package io.legado.app.utils.icu4j;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
/**
* This class represents a charset that has been identified by a CharsetDetector
* as a possible encoding for a set of input data. From an instance of this
* class, you can ask for a confidence level in the charset identification,
* or for Java Reader or String to access the original byte data in Unicode form.
* <p/>
* Instances of this class are created only by CharsetDetectors.
* <p/>
* Note: this class has a natural ordering that is inconsistent with equals.
* The natural ordering is based on the match confidence value.
*
* @stable ICU 3.4
* @hide All android.icu classes are currently hidden
*/
public class CharsetMatch implements Comparable<CharsetMatch> {
/**
* Create a java.io.Reader for reading the Unicode character data corresponding
* to the original byte data supplied to the Charset detect operation.
* <p/>
* CAUTION: if the source of the byte data was an InputStream, a Reader
* can be created for only one matching char set using this method. If more
* than one charset needs to be tried, the caller will need to reset
* the InputStream and create InputStreamReaders itself, based on the charset name.
*
* @return the Reader for the Unicode character data.
* @stable ICU 3.4
*/
public Reader getReader() {
InputStream inputStream = fInputStream;
if (inputStream == null) {
inputStream = new ByteArrayInputStream(fRawInput, 0, fRawLength);
}
try {
inputStream.reset();
return new InputStreamReader(inputStream, getName());
} catch (IOException e) {
return null;
}
}
/**
* Create a Java String from Unicode character data corresponding
* to the original byte data supplied to the Charset detect operation.
*
* @return a String created from the converted input data.
* @stable ICU 3.4
*/
public String getString() throws IOException {
return getString(-1);
}
/**
* Create a Java String from Unicode character data corresponding
* to the original byte data supplied to the Charset detect operation.
* The length of the returned string is limited to the specified size;
* the string will be trunctated to this length if necessary. A limit value of
* zero or less is ignored, and treated as no limit.
*
* @param maxLength The maximium length of the String to be created when the
* source of the data is an input stream, or -1 for
* unlimited length.
* @return a String created from the converted input data.
* @stable ICU 3.4
*/
public String getString(int maxLength) throws IOException {
String result = null;
if (fInputStream != null) {
StringBuilder sb = new StringBuilder();
char[] buffer = new char[1024];
Reader reader = getReader();
int max = maxLength < 0 ? Integer.MAX_VALUE : maxLength;
int bytesRead = 0;
while ((bytesRead = reader.read(buffer, 0, Math.min(max, 1024))) >= 0) {
sb.append(buffer, 0, bytesRead);
max -= bytesRead;
}
reader.close();
return sb.toString();
} else {
String name = getName();
/*
* getName() may return a name with a suffix 'rtl' or 'ltr'. This cannot
* be used to open a charset (e.g. IBM424_rtl). The ending '_rtl' or 'ltr'
* should be stripped off before creating the string.
*/
int startSuffix = name.indexOf("_rtl") < 0 ? name.indexOf("_ltr") : name.indexOf("_rtl");
if (startSuffix > 0) {
name = name.substring(0, startSuffix);
}
result = new String(fRawInput, name);
}
return result;
}
/**
* Get an indication of the confidence in the charset detected.
* Confidence values range from 0-100, with larger numbers indicating
* a better match of the input data to the characteristics of the
* charset.
*
* @return the confidence in the charset match
* @stable ICU 3.4
*/
public int getConfidence() {
return fConfidence;
}
/**
* Get the name of the detected charset.
* The name will be one that can be used with other APIs on the
* platform that accept charset names. It is the "Canonical name"
* as defined by the class java.nio.charset.Charset; for
* charsets that are registered with the IANA charset registry,
* this is the MIME-preferred registerd name.
*
* @return The name of the charset.
* @stable ICU 3.4
* @see java.nio.charset.Charset
* @see InputStreamReader
*/
public String getName() {
return fCharsetName;
}
/**
* Get the ISO code for the language of the detected charset.
*
* @return The ISO code for the language or <code>null</code> if the language cannot be determined.
* @stable ICU 3.4
*/
public String getLanguage() {
return fLang;
}
/**
* Compare to other CharsetMatch objects.
* Comparison is based on the match confidence value, which
* allows CharsetDetector.detectAll() to order its results.
*
* @param other the CharsetMatch object to compare against.
* @return a negative integer, zero, or a positive integer as the
* confidence level of this CharsetMatch
* is less than, equal to, or greater than that of
* the argument.
* @throws ClassCastException if the argument is not a CharsetMatch.
* @stable ICU 4.4
*/
public int compareTo(CharsetMatch other) {
int compareResult = 0;
if (this.fConfidence > other.fConfidence) {
compareResult = 1;
} else if (this.fConfidence < other.fConfidence) {
compareResult = -1;
}
return compareResult;
}
/*
* Constructor. Implementation internal
*/
CharsetMatch(CharsetDetector det, CharsetRecognizer rec, int conf) {
fConfidence = conf;
// The references to the original application input data must be copied out
// of the charset recognizer to here, in case the application resets the
// recognizer before using this CharsetMatch.
if (det.fInputStream == null) {
// We only want the existing input byte data if it came straight from the user,
// not if is just the head of a stream.
fRawInput = det.fRawInput;
fRawLength = det.fRawLength;
}
fInputStream = det.fInputStream;
fCharsetName = rec.getName();
fLang = rec.getLanguage();
}
/*
* Constructor. Implementation internal
*/
CharsetMatch(CharsetDetector det, CharsetRecognizer rec, int conf, String csName, String lang) {
fConfidence = conf;
// The references to the original application input data must be copied out
// of the charset recognizer to here, in case the application resets the
// recognizer before using this CharsetMatch.
if (det.fInputStream == null) {
// We only want the existing input byte data if it came straight from the user,
// not if is just the head of a stream.
fRawInput = det.fRawInput;
fRawLength = det.fRawLength;
}
fInputStream = det.fInputStream;
fCharsetName = csName;
fLang = lang;
}
//
// Private Data
//
private int fConfidence;
private byte[] fRawInput = null; // Original, untouched input bytes.
// If user gave us a byte array, this is it.
private int fRawLength; // Length of data in fRawInput array.
private InputStream fInputStream = null; // User's input stream, or null if the user
// gave us a byte array.
private String fCharsetName; // The name of the charset this CharsetMatch
// represents. Filled in by the recognizer.
private String fLang; // The language, if one was determined by
// the recognizer during the detect operation.
}

@ -0,0 +1,164 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/*
*******************************************************************************
* Copyright (C) 2005 - 2012, International Business Machines Corporation and *
* others. All Rights Reserved. *
*******************************************************************************
*/
package io.legado.app.utils.icu4j;
/**
* class CharsetRecog_2022 part of the ICU charset detection imlementation.
* This is a superclass for the individual detectors for
* each of the detectable members of the ISO 2022 family
* of encodings.
* <p>
* The separate classes are nested within this class.
*/
abstract class CharsetRecog_2022 extends CharsetRecognizer {
/**
* Matching function shared among the 2022 detectors JP, CN and KR
* Counts up the number of legal an unrecognized escape sequences in
* the sample of text, and computes a score based on the total number &
* the proportion that fit the encoding.
*
* @param text the byte buffer containing text to analyse
* @param textLen the size of the text in the byte.
* @param escapeSequences the byte escape sequences to test for.
* @return match quality, in the range of 0-100.
*/
int match(byte[] text, int textLen, byte[][] escapeSequences) {
int i, j;
int escN;
int hits = 0;
int misses = 0;
int shifts = 0;
int quality;
scanInput:
for (i = 0; i < textLen; i++) {
if (text[i] == 0x1b) {
checkEscapes:
for (escN = 0; escN < escapeSequences.length; escN++) {
byte[] seq = escapeSequences[escN];
if ((textLen - i) < seq.length) {
continue checkEscapes;
}
for (j = 1; j < seq.length; j++) {
if (seq[j] != text[i + j]) {
continue checkEscapes;
}
}
hits++;
i += seq.length - 1;
continue scanInput;
}
misses++;
}
if (text[i] == 0x0e || text[i] == 0x0f) {
// Shift in/out
shifts++;
}
}
if (hits == 0) {
return 0;
}
//
// Initial quality is based on relative proportion of recongized vs.
// unrecognized escape sequences.
// All good: quality = 100;
// half or less good: quality = 0;
// linear inbetween.
quality = (100 * hits - 100 * misses) / (hits + misses);
// Back off quality if there were too few escape sequences seen.
// Include shifts in this computation, so that KR does not get penalized
// for having only a single Escape sequence, but many shifts.
if (hits + shifts < 5) {
quality -= (5 - (hits + shifts)) * 10;
}
if (quality < 0) {
quality = 0;
}
return quality;
}
static class CharsetRecog_2022JP extends CharsetRecog_2022 {
private byte[][] escapeSequences = {
{0x1b, 0x24, 0x28, 0x43}, // KS X 1001:1992
{0x1b, 0x24, 0x28, 0x44}, // JIS X 212-1990
{0x1b, 0x24, 0x40}, // JIS C 6226-1978
{0x1b, 0x24, 0x41}, // GB 2312-80
{0x1b, 0x24, 0x42}, // JIS X 208-1983
{0x1b, 0x26, 0x40}, // JIS X 208 1990, 1997
{0x1b, 0x28, 0x42}, // ASCII
{0x1b, 0x28, 0x48}, // JIS-Roman
{0x1b, 0x28, 0x49}, // Half-width katakana
{0x1b, 0x28, 0x4a}, // JIS-Roman
{0x1b, 0x2e, 0x41}, // ISO 8859-1
{0x1b, 0x2e, 0x46} // ISO 8859-7
};
String getName() {
return "ISO-2022-JP";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det.fInputBytes, det.fInputLen, escapeSequences);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
}
static class CharsetRecog_2022KR extends CharsetRecog_2022 {
private byte[][] escapeSequences = {
{0x1b, 0x24, 0x29, 0x43}
};
String getName() {
return "ISO-2022-KR";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det.fInputBytes, det.fInputLen, escapeSequences);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
}
static class CharsetRecog_2022CN extends CharsetRecog_2022 {
private byte[][] escapeSequences = {
{0x1b, 0x24, 0x29, 0x41}, // GB 2312-80
{0x1b, 0x24, 0x29, 0x47}, // CNS 11643-1992 Plane 1
{0x1b, 0x24, 0x2A, 0x48}, // CNS 11643-1992 Plane 2
{0x1b, 0x24, 0x29, 0x45}, // ISO-IR-165
{0x1b, 0x24, 0x2B, 0x49}, // CNS 11643-1992 Plane 3
{0x1b, 0x24, 0x2B, 0x4A}, // CNS 11643-1992 Plane 4
{0x1b, 0x24, 0x2B, 0x4B}, // CNS 11643-1992 Plane 5
{0x1b, 0x24, 0x2B, 0x4C}, // CNS 11643-1992 Plane 6
{0x1b, 0x24, 0x2B, 0x4D}, // CNS 11643-1992 Plane 7
{0x1b, 0x4e}, // SS2
{0x1b, 0x4f}, // SS3
};
String getName() {
return "ISO-2022-CN";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det.fInputBytes, det.fInputLen, escapeSequences);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
}
}

@ -0,0 +1,97 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/**
* ******************************************************************************
* Copyright (C) 2005 - 2014, International Business Machines Corporation and *
* others. All Rights Reserved. *
* ******************************************************************************
*/
package io.legado.app.utils.icu4j;
/**
* Charset recognizer for UTF-8
*/
class CharsetRecog_UTF8 extends CharsetRecognizer {
String getName() {
return "UTF-8";
}
/* (non-Javadoc)
* @see com.ibm.icu.text.CharsetRecognizer#match(com.ibm.icu.text.CharsetDetector)
*/
CharsetMatch match(CharsetDetector det) {
boolean hasBOM = false;
int numValid = 0;
int numInvalid = 0;
byte input[] = det.fRawInput;
int i;
int trailBytes = 0;
int confidence;
if (det.fRawLength >= 3 &&
(input[0] & 0xFF) == 0xef && (input[1] & 0xFF) == 0xbb && (input[2] & 0xFF) == 0xbf) {
hasBOM = true;
}
// Scan for multi-byte sequences
for (i = 0; i < det.fRawLength; i++) {
int b = input[i];
if ((b & 0x80) == 0) {
continue; // ASCII
}
// Hi bit on char found. Figure out how long the sequence should be
if ((b & 0x0e0) == 0x0c0) {
trailBytes = 1;
} else if ((b & 0x0f0) == 0x0e0) {
trailBytes = 2;
} else if ((b & 0x0f8) == 0xf0) {
trailBytes = 3;
} else {
numInvalid++;
continue;
}
// Verify that we've got the right number of trail bytes in the sequence
for (; ; ) {
i++;
if (i >= det.fRawLength) {
break;
}
b = input[i];
if ((b & 0xc0) != 0x080) {
numInvalid++;
break;
}
if (--trailBytes == 0) {
numValid++;
break;
}
}
}
// Cook up some sort of confidence score, based on presense of a BOM
// and the existence of valid and/or invalid multi-byte sequences.
confidence = 0;
if (hasBOM && numInvalid == 0) {
confidence = 100;
} else if (hasBOM && numValid > numInvalid * 10) {
confidence = 80;
} else if (numValid > 3 && numInvalid == 0) {
confidence = 100;
} else if (numValid > 0 && numInvalid == 0) {
confidence = 80;
} else if (numValid == 0 && numInvalid == 0) {
// Plain ASCII. Confidence must be > 10, it's more likely than UTF-16, which
// accepts ASCII with confidence = 10.
// TODO: add plain ASCII as an explicitly detected type.
confidence = 15;
} else if (numValid > numInvalid * 10) {
// Probably corruput utf-8 data. Valid sequences aren't likely by chance.
confidence = 25;
}
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
}

@ -0,0 +1,186 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/*
*******************************************************************************
* Copyright (C) 1996-2013, International Business Machines Corporation and *
* others. All Rights Reserved. *
*******************************************************************************
*
*/
package io.legado.app.utils.icu4j;
/**
* This class matches UTF-16 and UTF-32, both big- and little-endian. The
* BOM will be used if it is present.
*/
abstract class CharsetRecog_Unicode extends CharsetRecognizer {
/* (non-Javadoc)
* @see com.ibm.icu.text.CharsetRecognizer#getName()
*/
abstract String getName();
/* (non-Javadoc)
* @see com.ibm.icu.text.CharsetRecognizer#match(com.ibm.icu.text.CharsetDetector)
*/
abstract CharsetMatch match(CharsetDetector det);
static int codeUnit16FromBytes(byte hi, byte lo) {
return ((hi & 0xff) << 8) | (lo & 0xff);
}
// UTF-16 confidence calculation. Very simple minded, but better than nothing.
// Any 8 bit non-control characters bump the confidence up. These have a zero high byte,
// and are very likely to be UTF-16, although they could also be part of a UTF-32 code.
// NULs are a contra-indication, they will appear commonly if the actual encoding is UTF-32.
// NULs should be rare in actual text.
static int adjustConfidence(int codeUnit, int confidence) {
if (codeUnit == 0) {
confidence -= 10;
} else if ((codeUnit >= 0x20 && codeUnit <= 0xff) || codeUnit == 0x0a) {
confidence += 10;
}
if (confidence < 0) {
confidence = 0;
} else if (confidence > 100) {
confidence = 100;
}
return confidence;
}
static class CharsetRecog_UTF_16_BE extends CharsetRecog_Unicode {
String getName() {
return "UTF-16BE";
}
CharsetMatch match(CharsetDetector det) {
byte[] input = det.fRawInput;
int confidence = 10;
int bytesToCheck = Math.min(input.length, 30);
for (int charIndex = 0; charIndex < bytesToCheck - 1; charIndex += 2) {
int codeUnit = codeUnit16FromBytes(input[charIndex], input[charIndex + 1]);
if (charIndex == 0 && codeUnit == 0xFEFF) {
confidence = 100;
break;
}
confidence = adjustConfidence(codeUnit, confidence);
if (confidence == 0 || confidence == 100) {
break;
}
}
if (bytesToCheck < 4 && confidence < 100) {
confidence = 0;
}
if (confidence > 0) {
return new CharsetMatch(det, this, confidence);
}
return null;
}
}
static class CharsetRecog_UTF_16_LE extends CharsetRecog_Unicode {
String getName() {
return "UTF-16LE";
}
CharsetMatch match(CharsetDetector det) {
byte[] input = det.fRawInput;
int confidence = 10;
int bytesToCheck = Math.min(input.length, 30);
for (int charIndex = 0; charIndex < bytesToCheck - 1; charIndex += 2) {
int codeUnit = codeUnit16FromBytes(input[charIndex + 1], input[charIndex]);
if (charIndex == 0 && codeUnit == 0xFEFF) {
confidence = 100;
break;
}
confidence = adjustConfidence(codeUnit, confidence);
if (confidence == 0 || confidence == 100) {
break;
}
}
if (bytesToCheck < 4 && confidence < 100) {
confidence = 0;
}
if (confidence > 0) {
return new CharsetMatch(det, this, confidence);
}
return null;
}
}
static abstract class CharsetRecog_UTF_32 extends CharsetRecog_Unicode {
abstract int getChar(byte[] input, int index);
abstract String getName();
CharsetMatch match(CharsetDetector det) {
byte[] input = det.fRawInput;
int limit = (det.fRawLength / 4) * 4;
int numValid = 0;
int numInvalid = 0;
boolean hasBOM = false;
int confidence = 0;
if (limit == 0) {
return null;
}
if (getChar(input, 0) == 0x0000FEFF) {
hasBOM = true;
}
for (int i = 0; i < limit; i += 4) {
int ch = getChar(input, i);
if (ch < 0 || ch >= 0x10FFFF || (ch >= 0xD800 && ch <= 0xDFFF)) {
numInvalid += 1;
} else {
numValid += 1;
}
}
// Cook up some sort of confidence score, based on presence of a BOM
// and the existence of valid and/or invalid multi-byte sequences.
if (hasBOM && numInvalid == 0) {
confidence = 100;
} else if (hasBOM && numValid > numInvalid * 10) {
confidence = 80;
} else if (numValid > 3 && numInvalid == 0) {
confidence = 100;
} else if (numValid > 0 && numInvalid == 0) {
confidence = 80;
} else if (numValid > numInvalid * 10) {
// Probably corrupt UTF-32BE data. Valid sequences aren't likely by chance.
confidence = 25;
}
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
}
static class CharsetRecog_UTF_32_BE extends CharsetRecog_UTF_32 {
int getChar(byte[] input, int index) {
return (input[index + 0] & 0xFF) << 24 | (input[index + 1] & 0xFF) << 16 |
(input[index + 2] & 0xFF) << 8 | (input[index + 3] & 0xFF);
}
String getName() {
return "UTF-32BE";
}
}
static class CharsetRecog_UTF_32_LE extends CharsetRecog_UTF_32 {
int getChar(byte[] input, int index) {
return (input[index + 3] & 0xFF) << 24 | (input[index + 2] & 0xFF) << 16 |
(input[index + 1] & 0xFF) << 8 | (input[index + 0] & 0xFF);
}
String getName() {
return "UTF-32LE";
}
}
}

@ -0,0 +1,540 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/*
****************************************************************************
* Copyright (C) 2005-2012, International Business Machines Corporation and *
* others. All Rights Reserved. *
****************************************************************************
*
*/
package io.legado.app.utils.icu4j;
import java.util.Arrays;
/**
* CharsetRecognizer implemenation for Asian - double or multi-byte - charsets.
* Match is determined mostly by the input data adhering to the
* encoding scheme for the charset, and, optionally,
* frequency-of-occurence of characters.
* <p/>
* Instances of this class are singletons, one per encoding
* being recognized. They are created in the main
* CharsetDetector class and kept in the global list of available
* encodings to be checked. The specific encoding being recognized
* is determined by subclass.
*/
abstract class CharsetRecog_mbcs extends CharsetRecognizer {
/**
* Get the IANA name of this charset.
*
* @return the charset name.
*/
abstract String getName();
/**
* Test the match of this charset with the input text data
* which is obtained via the CharsetDetector object.
*
* @param det The CharsetDetector, which contains the input text
* to be checked for being in this charset.
* @return Two values packed into one int (Damn java, anyhow)
* <br/>
* bits 0-7: the match confidence, ranging from 0-100
* <br/>
* bits 8-15: The match reason, an enum-like value.
*/
int match(CharsetDetector det, int[] commonChars) {
@SuppressWarnings("unused")
int singleByteCharCount = 0; //TODO Do we really need this?
int doubleByteCharCount = 0;
int commonCharCount = 0;
int badCharCount = 0;
int totalCharCount = 0;
int confidence = 0;
iteratedChar iter = new iteratedChar();
detectBlock:
{
for (iter.reset(); nextChar(iter, det); ) {
totalCharCount++;
if (iter.error) {
badCharCount++;
} else {
long cv = iter.charValue & 0xFFFFFFFFL;
if (cv <= 0xff) {
singleByteCharCount++;
} else {
doubleByteCharCount++;
if (commonChars != null) {
// NOTE: This assumes that there are no 4-byte common chars.
if (Arrays.binarySearch(commonChars, (int) cv) >= 0) {
commonCharCount++;
}
}
}
}
if (badCharCount >= 2 && badCharCount * 5 >= doubleByteCharCount) {
// Bail out early if the byte data is not matching the encoding scheme.
break detectBlock;
}
}
if (doubleByteCharCount <= 10 && badCharCount == 0) {
// Not many multi-byte chars.
if (doubleByteCharCount == 0 && totalCharCount < 10) {
// There weren't any multibyte sequences, and there was a low density of non-ASCII single bytes.
// We don't have enough data to have any confidence.
// Statistical analysis of single byte non-ASCII charcters would probably help here.
confidence = 0;
} else {
// ASCII or ISO file? It's probably not our encoding,
// but is not incompatible with our encoding, so don't give it a zero.
confidence = 10;
}
break detectBlock;
}
//
// No match if there are too many characters that don't fit the encoding scheme.
// (should we have zero tolerance for these?)
//
if (doubleByteCharCount < 20 * badCharCount) {
confidence = 0;
break detectBlock;
}
if (commonChars == null) {
// We have no statistics on frequently occuring characters.
// Assess confidence purely on having a reasonable number of
// multi-byte characters (the more the better
confidence = 30 + doubleByteCharCount - 20 * badCharCount;
if (confidence > 100) {
confidence = 100;
}
} else {
//
// Frequency of occurence statistics exist.
//
double maxVal = Math.log((float) doubleByteCharCount / 4);
double scaleFactor = 90.0 / maxVal;
confidence = (int) (Math.log(commonCharCount + 1) * scaleFactor + 10);
confidence = Math.min(confidence, 100);
}
} // end of detectBlock:
return confidence;
}
// "Character" iterated character class.
// Recognizers for specific mbcs encodings make their "characters" available
// by providing a nextChar() function that fills in an instance of iteratedChar
// with the next char from the input.
// The returned characters are not converted to Unicode, but remain as the raw
// bytes (concatenated into an int) from the codepage data.
//
// For Asian charsets, use the raw input rather than the input that has been
// stripped of markup. Detection only considers multi-byte chars, effectively
// stripping markup anyway, and double byte chars do occur in markup too.
//
static class iteratedChar {
int charValue = 0; // 1-4 bytes from the raw input data
int index = 0;
int nextIndex = 0;
boolean error = false;
boolean done = false;
void reset() {
charValue = 0;
index = -1;
nextIndex = 0;
error = false;
done = false;
}
int nextByte(CharsetDetector det) {
if (nextIndex >= det.fRawLength) {
done = true;
return -1;
}
int byteValue = (int) det.fRawInput[nextIndex++] & 0x00ff;
return byteValue;
}
}
/**
* Get the next character (however many bytes it is) from the input data
* Subclasses for specific charset encodings must implement this function
* to get characters according to the rules of their encoding scheme.
* <p>
* This function is not a method of class iteratedChar only because
* that would require a lot of extra derived classes, which is awkward.
*
* @param it The iteratedChar "struct" into which the returned char is placed.
* @param det The charset detector, which is needed to get at the input byte data
* being iterated over.
* @return True if a character was returned, false at end of input.
*/
abstract boolean nextChar(iteratedChar it, CharsetDetector det);
/**
* Shift-JIS charset recognizer.
*/
static class CharsetRecog_sjis extends CharsetRecog_mbcs {
static int[] commonChars =
// TODO: This set of data comes from the character frequency-
// of-occurence analysis tool. The data needs to be moved
// into a resource and loaded from there.
{0x8140, 0x8141, 0x8142, 0x8145, 0x815b, 0x8169, 0x816a, 0x8175, 0x8176, 0x82a0,
0x82a2, 0x82a4, 0x82a9, 0x82aa, 0x82ab, 0x82ad, 0x82af, 0x82b1, 0x82b3, 0x82b5,
0x82b7, 0x82bd, 0x82be, 0x82c1, 0x82c4, 0x82c5, 0x82c6, 0x82c8, 0x82c9, 0x82cc,
0x82cd, 0x82dc, 0x82e0, 0x82e7, 0x82e8, 0x82e9, 0x82ea, 0x82f0, 0x82f1, 0x8341,
0x8343, 0x834e, 0x834f, 0x8358, 0x835e, 0x8362, 0x8367, 0x8375, 0x8376, 0x8389,
0x838a, 0x838b, 0x838d, 0x8393, 0x8e96, 0x93fa, 0x95aa};
boolean nextChar(iteratedChar it, CharsetDetector det) {
it.index = it.nextIndex;
it.error = false;
int firstByte;
firstByte = it.charValue = it.nextByte(det);
if (firstByte < 0) {
return false;
}
if (firstByte <= 0x7f || (firstByte > 0xa0 && firstByte <= 0xdf)) {
return true;
}
int secondByte = it.nextByte(det);
if (secondByte < 0) {
return false;
}
it.charValue = (firstByte << 8) | secondByte;
if (!((secondByte >= 0x40 && secondByte <= 0x7f) || (secondByte >= 0x80 && secondByte <= 0xff))) {
// Illegal second byte value.
it.error = true;
}
return true;
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det, commonChars);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
String getName() {
return "Shift_JIS";
}
public String getLanguage() {
return "ja";
}
}
/**
* Big5 charset recognizer.
*/
static class CharsetRecog_big5 extends CharsetRecog_mbcs {
static int[] commonChars =
// TODO: This set of data comes from the character frequency-
// of-occurence analysis tool. The data needs to be moved
// into a resource and loaded from there.
{0xa140, 0xa141, 0xa142, 0xa143, 0xa147, 0xa149, 0xa175, 0xa176, 0xa440, 0xa446,
0xa447, 0xa448, 0xa451, 0xa454, 0xa457, 0xa464, 0xa46a, 0xa46c, 0xa477, 0xa4a3,
0xa4a4, 0xa4a7, 0xa4c1, 0xa4ce, 0xa4d1, 0xa4df, 0xa4e8, 0xa4fd, 0xa540, 0xa548,
0xa558, 0xa569, 0xa5cd, 0xa5e7, 0xa657, 0xa661, 0xa662, 0xa668, 0xa670, 0xa6a8,
0xa6b3, 0xa6b9, 0xa6d3, 0xa6db, 0xa6e6, 0xa6f2, 0xa740, 0xa751, 0xa759, 0xa7da,
0xa8a3, 0xa8a5, 0xa8ad, 0xa8d1, 0xa8d3, 0xa8e4, 0xa8fc, 0xa9c0, 0xa9d2, 0xa9f3,
0xaa6b, 0xaaba, 0xaabe, 0xaacc, 0xaafc, 0xac47, 0xac4f, 0xacb0, 0xacd2, 0xad59,
0xaec9, 0xafe0, 0xb0ea, 0xb16f, 0xb2b3, 0xb2c4, 0xb36f, 0xb44c, 0xb44e, 0xb54c,
0xb5a5, 0xb5bd, 0xb5d0, 0xb5d8, 0xb671, 0xb7ed, 0xb867, 0xb944, 0xbad8, 0xbb44,
0xbba1, 0xbdd1, 0xc2c4, 0xc3b9, 0xc440, 0xc45f};
boolean nextChar(iteratedChar it, CharsetDetector det) {
it.index = it.nextIndex;
it.error = false;
int firstByte;
firstByte = it.charValue = it.nextByte(det);
if (firstByte < 0) {
return false;
}
if (firstByte <= 0x7f || firstByte == 0xff) {
// single byte character.
return true;
}
int secondByte = it.nextByte(det);
if (secondByte < 0) {
return false;
}
it.charValue = (it.charValue << 8) | secondByte;
if (secondByte < 0x40 ||
secondByte == 0x7f ||
secondByte == 0xff) {
it.error = true;
}
return true;
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det, commonChars);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
String getName() {
return "Big5";
}
public String getLanguage() {
return "zh";
}
}
/**
* EUC charset recognizers. One abstract class that provides the common function
* for getting the next character according to the EUC encoding scheme,
* and nested derived classes for EUC_KR, EUC_JP, EUC_CN.
*/
abstract static class CharsetRecog_euc extends CharsetRecog_mbcs {
/*
* (non-Javadoc)
* Get the next character value for EUC based encodings.
* Character "value" is simply the raw bytes that make up the character
* packed into an int.
*/
boolean nextChar(iteratedChar it, CharsetDetector det) {
it.index = it.nextIndex;
it.error = false;
int firstByte = 0;
int secondByte = 0;
int thirdByte = 0;
//int fourthByte = 0;
buildChar:
{
firstByte = it.charValue = it.nextByte(det);
if (firstByte < 0) {
// Ran off the end of the input data
it.done = true;
break buildChar;
}
if (firstByte <= 0x8d) {
// single byte char
break buildChar;
}
secondByte = it.nextByte(det);
it.charValue = (it.charValue << 8) | secondByte;
if (firstByte >= 0xA1 && firstByte <= 0xfe) {
// Two byte Char
if (secondByte < 0xa1) {
it.error = true;
}
break buildChar;
}
if (firstByte == 0x8e) {
// Code Set 2.
// In EUC-JP, total char size is 2 bytes, only one byte of actual char value.
// In EUC-TW, total char size is 4 bytes, three bytes contribute to char value.
// We don't know which we've got.
// Treat it like EUC-JP. If the data really was EUC-TW, the following two
// bytes will look like a well formed 2 byte char.
if (secondByte < 0xa1) {
it.error = true;
}
break buildChar;
}
if (firstByte == 0x8f) {
// Code set 3.
// Three byte total char size, two bytes of actual char value.
thirdByte = it.nextByte(det);
it.charValue = (it.charValue << 8) | thirdByte;
if (thirdByte < 0xa1) {
it.error = true;
}
}
}
return (it.done == false);
}
/**
* The charset recognize for EUC-JP. A singleton instance of this class
* is created and kept by the public CharsetDetector class
*/
static class CharsetRecog_euc_jp extends CharsetRecog_euc {
static int[] commonChars =
// TODO: This set of data comes from the character frequency-
// of-occurence analysis tool. The data needs to be moved
// into a resource and loaded from there.
{0xa1a1, 0xa1a2, 0xa1a3, 0xa1a6, 0xa1bc, 0xa1ca, 0xa1cb, 0xa1d6, 0xa1d7, 0xa4a2,
0xa4a4, 0xa4a6, 0xa4a8, 0xa4aa, 0xa4ab, 0xa4ac, 0xa4ad, 0xa4af, 0xa4b1, 0xa4b3,
0xa4b5, 0xa4b7, 0xa4b9, 0xa4bb, 0xa4bd, 0xa4bf, 0xa4c0, 0xa4c1, 0xa4c3, 0xa4c4,
0xa4c6, 0xa4c7, 0xa4c8, 0xa4c9, 0xa4ca, 0xa4cb, 0xa4ce, 0xa4cf, 0xa4d0, 0xa4de,
0xa4df, 0xa4e1, 0xa4e2, 0xa4e4, 0xa4e8, 0xa4e9, 0xa4ea, 0xa4eb, 0xa4ec, 0xa4ef,
0xa4f2, 0xa4f3, 0xa5a2, 0xa5a3, 0xa5a4, 0xa5a6, 0xa5a7, 0xa5aa, 0xa5ad, 0xa5af,
0xa5b0, 0xa5b3, 0xa5b5, 0xa5b7, 0xa5b8, 0xa5b9, 0xa5bf, 0xa5c3, 0xa5c6, 0xa5c7,
0xa5c8, 0xa5c9, 0xa5cb, 0xa5d0, 0xa5d5, 0xa5d6, 0xa5d7, 0xa5de, 0xa5e0, 0xa5e1,
0xa5e5, 0xa5e9, 0xa5ea, 0xa5eb, 0xa5ec, 0xa5ed, 0xa5f3, 0xb8a9, 0xb9d4, 0xbaee,
0xbbc8, 0xbef0, 0xbfb7, 0xc4ea, 0xc6fc, 0xc7bd, 0xcab8, 0xcaf3, 0xcbdc, 0xcdd1};
String getName() {
return "EUC-JP";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det, commonChars);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
public String getLanguage() {
return "ja";
}
}
/**
* The charset recognize for EUC-KR. A singleton instance of this class
* is created and kept by the public CharsetDetector class
*/
static class CharsetRecog_euc_kr extends CharsetRecog_euc {
static int[] commonChars =
// TODO: This set of data comes from the character frequency-
// of-occurence analysis tool. The data needs to be moved
// into a resource and loaded from there.
{0xb0a1, 0xb0b3, 0xb0c5, 0xb0cd, 0xb0d4, 0xb0e6, 0xb0ed, 0xb0f8, 0xb0fa, 0xb0fc,
0xb1b8, 0xb1b9, 0xb1c7, 0xb1d7, 0xb1e2, 0xb3aa, 0xb3bb, 0xb4c2, 0xb4cf, 0xb4d9,
0xb4eb, 0xb5a5, 0xb5b5, 0xb5bf, 0xb5c7, 0xb5e9, 0xb6f3, 0xb7af, 0xb7c2, 0xb7ce,
0xb8a6, 0xb8ae, 0xb8b6, 0xb8b8, 0xb8bb, 0xb8e9, 0xb9ab, 0xb9ae, 0xb9cc, 0xb9ce,
0xb9fd, 0xbab8, 0xbace, 0xbad0, 0xbaf1, 0xbbe7, 0xbbf3, 0xbbfd, 0xbcad, 0xbcba,
0xbcd2, 0xbcf6, 0xbdba, 0xbdc0, 0xbdc3, 0xbdc5, 0xbec6, 0xbec8, 0xbedf, 0xbeee,
0xbef8, 0xbefa, 0xbfa1, 0xbfa9, 0xbfc0, 0xbfe4, 0xbfeb, 0xbfec, 0xbff8, 0xc0a7,
0xc0af, 0xc0b8, 0xc0ba, 0xc0bb, 0xc0bd, 0xc0c7, 0xc0cc, 0xc0ce, 0xc0cf, 0xc0d6,
0xc0da, 0xc0e5, 0xc0fb, 0xc0fc, 0xc1a4, 0xc1a6, 0xc1b6, 0xc1d6, 0xc1df, 0xc1f6,
0xc1f8, 0xc4a1, 0xc5cd, 0xc6ae, 0xc7cf, 0xc7d1, 0xc7d2, 0xc7d8, 0xc7e5, 0xc8ad};
String getName() {
return "EUC-KR";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det, commonChars);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
public String getLanguage() {
return "ko";
}
}
}
/**
* GB-18030 recognizer. Uses simplified Chinese statistics.
*/
static class CharsetRecog_gb_18030 extends CharsetRecog_mbcs {
/*
* (non-Javadoc)
* Get the next character value for EUC based encodings.
* Character "value" is simply the raw bytes that make up the character
* packed into an int.
*/
boolean nextChar(iteratedChar it, CharsetDetector det) {
it.index = it.nextIndex;
it.error = false;
int firstByte = 0;
int secondByte = 0;
int thirdByte = 0;
int fourthByte = 0;
buildChar:
{
firstByte = it.charValue = it.nextByte(det);
if (firstByte < 0) {
// Ran off the end of the input data
it.done = true;
break buildChar;
}
if (firstByte <= 0x80) {
// single byte char
break buildChar;
}
secondByte = it.nextByte(det);
it.charValue = (it.charValue << 8) | secondByte;
if (firstByte >= 0x81 && firstByte <= 0xFE) {
// Two byte Char
if ((secondByte >= 0x40 && secondByte <= 0x7E) || (secondByte >= 80 && secondByte <= 0xFE)) {
break buildChar;
}
// Four byte char
if (secondByte >= 0x30 && secondByte <= 0x39) {
thirdByte = it.nextByte(det);
if (thirdByte >= 0x81 && thirdByte <= 0xFE) {
fourthByte = it.nextByte(det);
if (fourthByte >= 0x30 && fourthByte <= 0x39) {
it.charValue = (it.charValue << 16) | (thirdByte << 8) | fourthByte;
break buildChar;
}
}
}
it.error = true;
break buildChar;
}
}
return (it.done == false);
}
static int[] commonChars =
// TODO: This set of data comes from the character frequency-
// of-occurence analysis tool. The data needs to be moved
// into a resource and loaded from there.
{0xa1a1, 0xa1a2, 0xa1a3, 0xa1a4, 0xa1b0, 0xa1b1, 0xa1f1, 0xa1f3, 0xa3a1, 0xa3ac,
0xa3ba, 0xb1a8, 0xb1b8, 0xb1be, 0xb2bb, 0xb3c9, 0xb3f6, 0xb4f3, 0xb5bd, 0xb5c4,
0xb5e3, 0xb6af, 0xb6d4, 0xb6e0, 0xb7a2, 0xb7a8, 0xb7bd, 0xb7d6, 0xb7dd, 0xb8b4,
0xb8df, 0xb8f6, 0xb9ab, 0xb9c9, 0xb9d8, 0xb9fa, 0xb9fd, 0xbacd, 0xbba7, 0xbbd6,
0xbbe1, 0xbbfa, 0xbcbc, 0xbcdb, 0xbcfe, 0xbdcc, 0xbecd, 0xbedd, 0xbfb4, 0xbfc6,
0xbfc9, 0xc0b4, 0xc0ed, 0xc1cb, 0xc2db, 0xc3c7, 0xc4dc, 0xc4ea, 0xc5cc, 0xc6f7,
0xc7f8, 0xc8ab, 0xc8cb, 0xc8d5, 0xc8e7, 0xc9cf, 0xc9fa, 0xcab1, 0xcab5, 0xcac7,
0xcad0, 0xcad6, 0xcaf5, 0xcafd, 0xccec, 0xcdf8, 0xceaa, 0xcec4, 0xced2, 0xcee5,
0xcfb5, 0xcfc2, 0xcfd6, 0xd0c2, 0xd0c5, 0xd0d0, 0xd0d4, 0xd1a7, 0xd2aa, 0xd2b2,
0xd2b5, 0xd2bb, 0xd2d4, 0xd3c3, 0xd3d0, 0xd3fd, 0xd4c2, 0xd4da, 0xd5e2, 0xd6d0};
String getName() {
return "GB18030";
}
CharsetMatch match(CharsetDetector det) {
int confidence = match(det, commonChars);
return confidence == 0 ? null : new CharsetMatch(det, this, confidence);
}
public String getLanguage() {
return "zh";
}
}
}

@ -0,0 +1,53 @@
/* GENERATED SOURCE. DO NOT MODIFY. */
/**
* ******************************************************************************
* Copyright (C) 2005-2012, International Business Machines Corporation and *
* others. All Rights Reserved. *
* ******************************************************************************
*/
package io.legado.app.utils.icu4j;
/**
* Abstract class for recognizing a single charset.
* Part of the implementation of ICU's CharsetDetector.
* <p>
* Each specific charset that can be recognized will have an instance
* of some subclass of this class. All interaction between the overall
* CharsetDetector and the stuff specific to an individual charset happens
* via the interface provided here.
* <p>
* Instances of CharsetDetector DO NOT have or maintain
* state pertaining to a specific match or detect operation.
* The WILL be shared by multiple instances of CharsetDetector.
* They encapsulate const charset-specific information.
*/
abstract class CharsetRecognizer {
/**
* Get the IANA name of this charset.
*
* @return the charset name.
*/
abstract String getName();
/**
* Get the ISO language code for this charset.
*
* @return the language code, or <code>null</code> if the language cannot be determined.
*/
public String getLanguage() {
return null;
}
/**
* Test the match of this charset with the input text data
* which is obtained via the CharsetDetector object.
*
* @param det The CharsetDetector, which contains the input text
* to be checked for being in this charset.
* @return A CharsetMatch object containing details of match
* with this charset, or null if there was no match.
*/
abstract CharsetMatch match(CharsetDetector det);
}
Loading…
Cancel
Save