change elasticsearch tokenizer to support chinese full-text search

This commit is contained in:
wuyingren 2020-04-30 01:22:11 +08:00
parent e3750a503e
commit 96b4da8b39
3 changed files with 3 additions and 3 deletions

View file

@ -4,7 +4,7 @@ class AccountsIndex < Chewy::Index
settings index: { refresh_interval: '5m' }, analysis: {
analyzer: {
content: {
tokenizer: 'whitespace',
tokenizer: 'ik_max_word',
filter: %w(lowercase asciifolding cjk_width),
},

View file

@ -18,7 +18,7 @@ class StatusesIndex < Chewy::Index
},
analyzer: {
content: {
tokenizer: 'uax_url_email',
tokenizer: 'ik_max_word',
filter: %w(
english_possessive_stemmer
lowercase

View file

@ -4,7 +4,7 @@ class TagsIndex < Chewy::Index
settings index: { refresh_interval: '15m' }, analysis: {
analyzer: {
content: {
tokenizer: 'keyword',
tokenizer: 'ik_max_word',
filter: %w(lowercase asciifolding cjk_width),
},