aboutsummaryrefslogtreecommitdiffstats
path: root/ucs2-interface/ucs-xml/UcsXml_Private.c
blob: 4fb4e0c5ca36cf9aefac8f37bcf697b9135caf91 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
/*------------------------------------------------------------------------------------------------*/
/* UNICENS XML Parser                                                                             */
/* Copyright 2017, Microchip Technology Inc. and its subsidiaries.                                */
/*                                                                                                */
/* Redistribution and use in source and binary forms, with or without                             */
/* modification, are permitted provided that the following conditions are met:                    */
/*                                                                                                */
/* 1. Redistributions of source code must retain the above copyright notice, this                 */
/*    list of conditions and the following disclaimer.                                            */
/*                                                                                                */
/* 2. Redistributions in binary form must reproduce the above copyright notice,                   */
/*    this list of conditions and the following disclaimer in the documentation                   */
/*    and/or other materials provided with the distribution.                                      */
/*                                                                                                */
/* 3. Neither the name of the copyright holder nor the names of its                               */
/*    contributors may be used to endorse or promote products derived from                        */
/*    this software without specific prior written permission.                                    */
/*                                                                                                */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"                    */
/* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE                      */
/* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE                 */
/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE                   */
/* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL                     */
/* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR                     */
/* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER                     */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,                  */
/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE                  */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.                           */
/*------------------------------------------------------------------------------------------------*/

#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "UcsXml.h"
#include "UcsXml_Private.h"

static const char* USB_PHY_STANDARD =       "Standard";
static const char* USB_PHY_HSIC =           "HSIC";

static const char* CLOCK_8FS =              "8Fs";
static const char* CLOCK_16FS =             "16Fs";
static const char* CLOCK_32FS =             "32Fs";
static const char* CLOCK_64FS =             "64Fs";
static const char* CLOCK_128FS =            "128Fs";
static const char* CLOCK_256FS =            "256Fs";
static const char* CLOCK_512FS =            "512Fs";
static const char* CLOCK_1024FS =           "1024Fs";
static const char* CLOCK_2048FS =           "2048Fs";
static const char* CLOCK_3072FS =           "3072Fs";
static const char* CLOCK_4096FS =           "4096Fs";
static const char* CLOCK_6144FS =           "6144Fs";
static const char* CLOCK_8192FS =           "8192Fs";
static const char* CLOCK_WILDCARD =         "Wildcard";

static const char* STRM_ALIGN_L16 =         "Left16Bit";
static const char* STRM_ALIGN_L24 =         "Left24Bit";
static const char* STRM_ALIGN_R16 =         "Right16Bit";
static const char* STRM_ALIGN_R24 =         "Right24Bit";
static const char* STRM_ALIGN_SEQUENTIAL =  "Seq";

static const char* I2S_PIN_SRXA0 =          "SRXA0";
static const char* I2S_PIN_SRXA1 =          "SRXA1";
static const char* I2S_PIN_SRXB0 =          "SRXB0";
static const char* I2S_PIN_SRXB1 =          "SRXB1";

static const char* MUTE_OFF =               "NoMuting";
static const char* MUTE_SIGNAL =            "MuteSignal";

#define ASSERT_FALSE(func, par) { UcsXml_CB_OnError("Parameter error in attribute=%s value=%s, file=%s, line=%d", 4, func, par,  __FILE__, __LINE__); return false; }
#define CHECK_POINTER(PTR) if (NULL == PTR) { ASSERT_FALSE(PTR, "NULL pointer"); }

static int32_t Str2Int(const char *val)
{
    return strtol( val, NULL, 0 );
}

void *MCalloc(struct UcsXmlObjectList *list, uint32_t nElem, uint32_t elemSize)
{
    void *obj;
    struct UcsXmlObjectList *tail = list;
    if (NULL == list || 0 == nElem || 0 == elemSize) return NULL;

    obj = calloc(nElem, elemSize);
    if (NULL == obj)
    {
        assert(false);
        return NULL;
    }
    if (NULL == list->obj)
    {
        list->obj = obj;
        return obj;
    }
    while(tail->next) tail = tail->next;
    tail->next = calloc(1, sizeof(struct UcsXmlObjectList));
    if (NULL == tail->next)
    {
        assert(false);
        free(obj);
        return NULL;
    }
    tail->next->obj = obj;
    return obj;
}

void FreeObjList(struct UcsXmlObjectList *cur)
{
    struct UcsXmlObjectList *root = cur;
    while(cur)
    {
        struct UcsXmlObjectList *next = cur->next;
        assert(NULL != cur->obj);
        if (cur->obj)
            free(cur->obj);
        if (cur != root)
            free(cur);
        cur = next;
    }
}

bool GetMostSocket(Ucs_Xrm_MostSocket_t **mostSoc, struct MostSocketParameters *param)
{
    Ucs_Xrm_MostSocket_t *soc = NULL;
    CHECK_POINTER(mostSoc);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    soc = MCalloc(param->list, 1, sizeof(Ucs_Xrm_MostSocket_t));
    CHECK_POINTER(soc);
    *mostSoc = soc;
    soc->resource_type = UCS_XRM_RC_TYPE_MOST_SOCKET;
    soc->most_port_handle = 0x0D00;
    soc->bandwidth = param->bandwidth;
    soc->direction = param->isSource ? UCS_SOCKET_DIR_INPUT : UCS_SOCKET_DIR_OUTPUT;
    switch(param->dataType)
    {
    case SYNC_DATA:
        soc->data_type = UCS_MOST_SCKT_SYNC_DATA;
        break;
    case AV_PACKETIZED:
        soc->data_type = UCS_MOST_SCKT_AV_PACKETIZED;
        break;
    case QOS_IP:
        soc->data_type = UCS_MOST_SCKT_QOS_IP;
        break;
    case DISC_FRAME_PHASE:
        soc->data_type = UCS_MOST_SCKT_DISC_FRAME_PHASE;
        break;
    default:
        ASSERT_FALSE("GetMostSocket->dataType", "");
    }
    return true;
}

bool GetUsbPort(Ucs_Xrm_UsbPort_t **usbPort, struct UsbPortParameters *param)
{
    Ucs_Xrm_UsbPort_t *port = NULL;
    CHECK_POINTER(usbPort);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->deviceInterfaces);
    CHECK_POINTER(param->streamInCount);
    CHECK_POINTER(param->streamOutCount);
    CHECK_POINTER(param->physicalLayer);
    port = MCalloc(param->list, 1, sizeof(Ucs_Xrm_UsbPort_t));
    CHECK_POINTER(port);
    *usbPort = port;
    port->resource_type = UCS_XRM_RC_TYPE_USB_PORT;
    port->index = 0;
    port->devices_interfaces = (uint16_t)Str2Int(param->deviceInterfaces);
    port->streaming_if_ep_in_count = (uint8_t)Str2Int(param->streamInCount);
    port->streaming_if_ep_out_count = (uint8_t)Str2Int(param->streamOutCount);
    if (0 == strcmp(USB_PHY_STANDARD, param->physicalLayer))
        port->physical_layer = UCS_USB_PHY_LAYER_STANDARD;
    else if (0 == strcmp(USB_PHY_HSIC, param->physicalLayer))
        port->physical_layer = UCS_USB_PHY_LAYER_HSCI;
    else ASSERT_FALSE("GetUsbPort->physical_layer", param->physicalLayer);
    return true;
}

bool GetUsbPortDefaultCreated(Ucs_Xrm_ResObject_t **usbPort, struct UcsXmlObjectList *list)
{
    Ucs_Xrm_DefaultCreatedPort_t *p;
    CHECK_POINTER(usbPort);
    CHECK_POINTER(list);
    p = MCalloc(list, 1, sizeof(Ucs_Xrm_DefaultCreatedPort_t));
    CHECK_POINTER(p);
    p->resource_type = UCS_XRM_RC_TYPE_DC_PORT;
    p->port_type = UCS_XRM_PORT_TYPE_USB;
    p->index = 0;
    *usbPort = (Ucs_Xrm_ResObject_t *)p;
    return true;
}

bool GetUsbSocket(Ucs_Xrm_UsbSocket_t **usbSoc, struct UsbSocketParameters *param)
{
    Ucs_Xrm_UsbSocket_t *soc = NULL;
    CHECK_POINTER(usbSoc);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->endpointAddress);
    CHECK_POINTER(param->framesPerTrans);
    CHECK_POINTER(param->usbPort);
    soc = MCalloc(param->list, 1, sizeof(Ucs_Xrm_UsbSocket_t));
    CHECK_POINTER(soc);
    *usbSoc = soc;
    soc->resource_type = UCS_XRM_RC_TYPE_USB_SOCKET;
    soc->direction = param->isSource ? UCS_SOCKET_DIR_INPUT : UCS_SOCKET_DIR_OUTPUT;
    switch(param->dataType)
    {
    case SYNC_DATA:
        soc->data_type = UCS_USB_SCKT_SYNC_DATA;
        break;
    case AV_PACKETIZED:
        soc->data_type = UCS_USB_SCKT_AV_PACKETIZED;
        break;
    case IPC_PACKET:
        soc->data_type = UCS_USB_SCKT_IPC_PACKET;
        break;
    default:
        ASSERT_FALSE("GetUsbSocket->dataType", "");
    }
    soc->end_point_addr = (uint8_t)Str2Int(param->endpointAddress);
    soc->frames_per_transfer = (uint16_t)Str2Int(param->framesPerTrans);
    soc->usb_port_obj_ptr = param->usbPort;
    return true;
}

bool GetMlbPort(Ucs_Xrm_MlbPort_t **mlbPort, struct MlbPortParameters *param)
{
    Ucs_Xrm_MlbPort_t *port = NULL;
    CHECK_POINTER(mlbPort);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->clockConfig);
    port = MCalloc(param->list, 1, sizeof(Ucs_Xrm_MlbPort_t));
    CHECK_POINTER(port);
    *mlbPort = port;
    port->resource_type = UCS_XRM_RC_TYPE_MLB_PORT;
    port->index = 0;
    if (0 == strcmp(param->clockConfig, CLOCK_256FS))
        port->clock_config = UCS_MLB_CLK_CFG_256_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_512FS))
        port->clock_config = UCS_MLB_CLK_CFG_512_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_1024FS))
        port->clock_config = UCS_MLB_CLK_CFG_1024_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_2048FS))
        port->clock_config = UCS_MLB_CLK_CFG_2048_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_3072FS))
        port->clock_config = UCS_MLB_CLK_CFG_3072_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_4096FS))
        port->clock_config = UCS_MLB_CLK_CFG_4096_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_6144FS))
        port->clock_config = UCS_MLB_CLK_CFG_6144_FS;
    else if (0 == strcmp(param->clockConfig, CLOCK_8192FS))
        port->clock_config = UCS_MLB_CLK_CFG_8192_FS;
    else ASSERT_FALSE("GetMlbPort->clockConfig", param->clockConfig);
    return true;
}

bool GetMlbPortDefaultCreated(Ucs_Xrm_ResObject_t **mlbPort, struct UcsXmlObjectList *list)
{
    Ucs_Xrm_DefaultCreatedPort_t *p;
    CHECK_POINTER(mlbPort);
    CHECK_POINTER(list)
    p = MCalloc(list, 1, sizeof(Ucs_Xrm_DefaultCreatedPort_t));
    CHECK_POINTER(p);
    p->resource_type = UCS_XRM_RC_TYPE_DC_PORT;
    p->port_type = UCS_XRM_PORT_TYPE_MLB;
    p->index = 0;
    *mlbPort = (Ucs_Xrm_ResObject_t *)p;
    return true;
}

bool GetMlbSocket(Ucs_Xrm_MlbSocket_t **mlbSoc, struct MlbSocketParameters *param)
{
    Ucs_Xrm_MlbSocket_t *soc = NULL;
    CHECK_POINTER(mlbSoc);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->channelAddress);
    CHECK_POINTER(param->mlbPort);
    soc = MCalloc(param->list, 1, sizeof(Ucs_Xrm_MlbSocket_t));
    CHECK_POINTER(soc);
    *mlbSoc = soc;
    soc->resource_type = UCS_XRM_RC_TYPE_MLB_SOCKET;
    soc->direction = param->isSource ? UCS_SOCKET_DIR_INPUT : UCS_SOCKET_DIR_OUTPUT;
    soc->bandwidth = param->bandwidth;
    switch(param->dataType)
    {
    case SYNC_DATA:
        soc->data_type = UCS_MLB_SCKT_SYNC_DATA;
        break;
    case AV_PACKETIZED:
        soc->data_type = UCS_MLB_SCKT_AV_PACKETIZED;
        break;
    case QOS_IP:
        soc->data_type = UCS_MLB_SCKT_QOS_IP;
        break;
    case DISC_FRAME_PHASE:
        soc->data_type = UCS_MLB_SCKT_DISC_FRAME_PHASE;
        break;
    case IPC_PACKET:
        soc->data_type = UCS_MLB_SCKT_IPC_PACKET;
        break;
    default:
        ASSERT_FALSE("GetMlbSocket->dataType", "");
    }
    soc->channel_address = (uint16_t)Str2Int(param->channelAddress);
    soc->mlb_port_obj_ptr = param->mlbPort;
    return true;
}

bool GetStrmPort(Ucs_Xrm_StrmPort_t **strmPort, struct StrmPortParameters *param)
{
    Ucs_Xrm_StrmPort_t *port = NULL;
    CHECK_POINTER(strmPort);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->clockConfig);
    port = MCalloc(param->list, 1, sizeof(Ucs_Xrm_StrmPort_t));
    CHECK_POINTER(port);
    *strmPort = port;
    port->resource_type = UCS_XRM_RC_TYPE_STRM_PORT;
    port->index = param->index;
    if (0 == port->index)
    {
        if (0 == strcmp(param->clockConfig, CLOCK_8FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_8FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_16FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_16FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_32FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_32FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_64FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_64FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_128FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_128FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_256FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_256FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_512FS))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_512FS;
        else if (0 == strcmp(param->clockConfig, CLOCK_WILDCARD))
            port->clock_config = UCS_STREAM_PORT_CLK_CFG_WILD;
        else ASSERT_FALSE("GetStrmPort->clockConfig", param->clockConfig);
    } else {
        port->clock_config = UCS_STREAM_PORT_CLK_CFG_WILD;
    }

    if (0 == strcmp(param->dataAlignment, STRM_ALIGN_L16))
        port->data_alignment = UCS_STREAM_PORT_ALGN_LEFT16BIT;
    else if (0 == strcmp(param->dataAlignment, STRM_ALIGN_L24))
        port->data_alignment = UCS_STREAM_PORT_ALGN_LEFT24BIT;
    else if (0 == strcmp(param->dataAlignment, STRM_ALIGN_R16))
        port->data_alignment = UCS_STREAM_PORT_ALGN_RIGHT16BIT;
    else if (0 == strcmp(param->dataAlignment, STRM_ALIGN_R24))
        port->data_alignment = UCS_STREAM_PORT_ALGN_RIGHT24BIT;
    else if (0 == strcmp(param->dataAlignment, STRM_ALIGN_SEQUENTIAL))
        port->data_alignment = UCS_STREAM_PORT_ALGN_SEQ;
    else ASSERT_FALSE("GetStrmPort->dataAlignment", param->dataAlignment);
    return true;
}

bool GetStrmSocket(Ucs_Xrm_StrmSocket_t **strmSoc, struct StrmSocketParameters *param)
{
    Ucs_Xrm_StrmSocket_t *soc = NULL;
    CHECK_POINTER(strmSoc);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->streamPin);
    CHECK_POINTER(param->streamPortA);
    CHECK_POINTER(param->streamPortB);
    soc = MCalloc(param->list, 1, sizeof(Ucs_Xrm_StrmSocket_t));
    CHECK_POINTER(soc);
    *strmSoc = soc;
    soc->resource_type = UCS_XRM_RC_TYPE_STRM_SOCKET;
    soc->direction = param->isSource ? UCS_SOCKET_DIR_INPUT : UCS_SOCKET_DIR_OUTPUT;
    switch(param->dataType)
    {
    case SYNC_DATA:
        soc->data_type = UCS_STREAM_PORT_SCKT_SYNC_DATA;
        break;
    default:
        ASSERT_FALSE("GetStrmSocket->dataType", "");
    }
    soc->bandwidth = param->bandwidth;
    if (0 == strcmp(param->streamPin, I2S_PIN_SRXA0))
    {
        soc->stream_pin_id = UCS_STREAM_PORT_PIN_ID_SRXA0;
        soc->stream_port_obj_ptr = param->streamPortA;
        return true;
    }
    else if (0 == strcmp(param->streamPin, I2S_PIN_SRXA1))
    {
        soc->stream_pin_id = UCS_STREAM_PORT_PIN_ID_SRXA1;
        soc->stream_port_obj_ptr = param->streamPortA;
        return true;
    }
    else if (0 == strcmp(param->streamPin, I2S_PIN_SRXB0))
    {
        soc->stream_pin_id = UCS_STREAM_PORT_PIN_ID_SRXB0;
        soc->stream_port_obj_ptr = param->streamPortB;
        return true;
    }
    else if (0 == strcmp(param->streamPin, I2S_PIN_SRXB1))
    {
        soc->stream_pin_id = UCS_STREAM_PORT_PIN_ID_SRXB1;
        soc->stream_port_obj_ptr = param->streamPortB;
        return true;
    }
    else ASSERT_FALSE("GetStrmSocket->streamPin", param->streamPin);
    return true;
}

bool GetSplitter(Ucs_Xrm_Splitter_t **splitter, struct SplitterParameters *param)
{
    Ucs_Xrm_Splitter_t *split = NULL;
    CHECK_POINTER(splitter);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    split = MCalloc(param->list, 1, sizeof(Ucs_Xrm_Splitter_t));
    CHECK_POINTER(split);
    *splitter = split;
    split->most_port_handle = 0x0D00;
    split->resource_type = UCS_XRM_RC_TYPE_SPLITTER;
    split->bytes_per_frame = param->bytesPerFrame;
    split->socket_in_obj_ptr = param->inSoc;
    return true;
}

bool GetCombiner(Ucs_Xrm_Combiner_t **combiner, struct CombinerParameters *param)
{
    Ucs_Xrm_Combiner_t *comb = NULL;
    CHECK_POINTER(combiner);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    comb = MCalloc(param->list, 1, sizeof(Ucs_Xrm_Combiner_t));
    CHECK_POINTER(comb);
    *combiner = comb;
    comb->most_port_handle = 0x0D00;
    comb->resource_type = UCS_XRM_RC_TYPE_COMBINER;
    comb->bytes_per_frame = param->bytesPerFrame;
    comb->port_socket_obj_ptr = param->outSoc;
    return true;
}

bool GetSyncCon(Ucs_Xrm_SyncCon_t **syncCon, struct SyncConParameters *param)
{
    Ucs_Xrm_SyncCon_t *con = NULL;
    CHECK_POINTER(syncCon);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->muteMode);
    CHECK_POINTER(param->inSoc);
    CHECK_POINTER(param->outSoc);
    con = MCalloc(param->list, 1, sizeof(Ucs_Xrm_SyncCon_t));
    CHECK_POINTER(con);
    *syncCon = con;
    con->resource_type = UCS_XRM_RC_TYPE_SYNC_CON;
    if (0 == strcmp(param->muteMode, MUTE_OFF))
        con->mute_mode = UCS_SYNC_MUTE_MODE_NO_MUTING;
    else if (0 == strcmp(param->muteMode, MUTE_SIGNAL))
        con->mute_mode = UCS_SYNC_MUTE_MODE_MUTE_SIGNAL;
    else ASSERT_FALSE("GetSyncCon->mute_mode", param->muteMode);
    if (param->optional_offset)
        con->offset = (uint16_t)Str2Int(param->optional_offset);
    else
        con->offset = 0;
    con->socket_in_obj_ptr = param->inSoc;
    con->socket_out_obj_ptr = param->outSoc;
    return true;
}

bool GetAvpCon(Ucs_Xrm_AvpCon_t **avpCon, struct AvpConParameters *param)
{
    Ucs_Xrm_AvpCon_t *con = NULL;
    CHECK_POINTER(avpCon);
    CHECK_POINTER(param);
    CHECK_POINTER(param->list);
    CHECK_POINTER(param->inSoc);
    CHECK_POINTER(param->outSoc);
    con = MCalloc(param->list, 1, sizeof(Ucs_Xrm_AvpCon_t));
    CHECK_POINTER(con);
    *avpCon = con;
    con->resource_type = UCS_XRM_RC_TYPE_AVP_CON;
    con->socket_in_obj_ptr = param->inSoc;
    con->socket_out_obj_ptr = param->outSoc;
    if (param->optional_isocPacketSize)
    {
        int32_t pSize = Str2Int(param->optional_isocPacketSize);
        switch(pSize)
        {
        case 188:
            con->isoc_packet_size = UCS_ISOC_PCKT_SIZE_188;
            break;
        case 196:
            con->isoc_packet_size = UCS_ISOC_PCKT_SIZE_196;
            break;
        case 206:
            con->isoc_packet_size = UCS_ISOC_PCKT_SIZE_206;
            break;
        default:
            ASSERT_FALSE("GetAvpCon->isoc_packet_size", "");
        }
    } else {
        con->isoc_packet_size = UCS_ISOC_PCKT_SIZE_188;
    }
    return true;
}
n class="nx">fn); }, this); }; /** * Removes a handler function from a specific event. * * @param {String} eventName The name of the event to remove this function from. * @param {Function} fn The function to remove from an event. * @memberOf EventEmitter */ elasticlunr.EventEmitter.prototype.removeListener = function (name, fn) { if (!this.hasHandler(name)) return; var fnIndex = this.events[name].indexOf(fn); if (fnIndex === -1) return; this.events[name].splice(fnIndex, 1); if (this.events[name].length == 0) delete this.events[name]; }; /** * Call all functions that bounded to the given event. * * Additional data can be passed to the event handler as arguments to `emit` * after the event name. * * @param {String} eventName The name of the event to emit. * @memberOf EventEmitter */ elasticlunr.EventEmitter.prototype.emit = function (name) { if (!this.hasHandler(name)) return; var args = Array.prototype.slice.call(arguments, 1); this.events[name].forEach(function (fn) { fn.apply(undefined, args); }, this); }; /** * Checks whether a handler has ever been stored against an event. * * @param {String} eventName The name of the event to check. * @private * @memberOf EventEmitter */ elasticlunr.EventEmitter.prototype.hasHandler = function (name) { return name in this.events; }; /*! * elasticlunr.tokenizer * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song */ /** * A function for splitting a string into tokens. * Currently English is supported as default. * Uses `elasticlunr.tokenizer.seperator` to split strings, you could change * the value of this property to set how you want strings are split into tokens. * IMPORTANT: use elasticlunr.tokenizer.seperator carefully, if you are not familiar with * text process, then you'd better not change it. * * @module * @param {String} str The string that you want to tokenize. * @see elasticlunr.tokenizer.seperator * @return {Array} */ elasticlunr.tokenizer = function (str) { if (!arguments.length || str === null || str === undefined) return []; if (Array.isArray(str)) { var arr = str.filter(function(token) { if (token === null || token === undefined) { return false; } return true; }); arr = arr.map(function (t) { return elasticlunr.utils.toString(t).toLowerCase(); }); var out = []; arr.forEach(function(item) { var tokens = item.split(elasticlunr.tokenizer.seperator); out = out.concat(tokens); }, this); return out; } return str.toString().trim().toLowerCase().split(elasticlunr.tokenizer.seperator); }; /** * Default string seperator. */ elasticlunr.tokenizer.defaultSeperator = /[\s\-]+/; /** * The sperator used to split a string into tokens. Override this property to change the behaviour of * `elasticlunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. * * @static * @see elasticlunr.tokenizer */ elasticlunr.tokenizer.seperator = elasticlunr.tokenizer.defaultSeperator; /** * Set up customized string seperator * * @param {Object} sep The customized seperator that you want to use to tokenize a string. */ elasticlunr.tokenizer.setSeperator = function(sep) { if (sep !== null && sep !== undefined && typeof(sep) === 'object') { elasticlunr.tokenizer.seperator = sep; } } /** * Reset string seperator * */ elasticlunr.tokenizer.resetSeperator = function() { elasticlunr.tokenizer.seperator = elasticlunr.tokenizer.defaultSeperator; } /** * Get string seperator * */ elasticlunr.tokenizer.getSeperator = function() { return elasticlunr.tokenizer.seperator; } /*! * elasticlunr.Pipeline * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song */ /** * elasticlunr.Pipelines maintain an ordered list of functions to be applied to * both documents tokens and query tokens. * * An instance of elasticlunr.Index will contain a pipeline * with a trimmer, a stop word filter, an English stemmer. Extra * functions can be added before or after either of these functions or these * default functions can be removed. * * When run the pipeline, it will call each function in turn. * * The output of the functions in the pipeline will be passed to the next function * in the pipeline. To exclude a token from entering the index the function * should return undefined, the rest of the pipeline will not be called with * this token. * * For serialisation of pipelines to work, all functions used in an instance of * a pipeline should be registered with elasticlunr.Pipeline. Registered functions can * then be loaded. If trying to load a serialised pipeline that uses functions * that are not registered an error will be thrown. * * If not planning on serialising the pipeline then registering pipeline functions * is not necessary. * * @constructor */ elasticlunr.Pipeline = function () { this._queue = []; }; elasticlunr.Pipeline.registeredFunctions = {}; /** * Register a function in the pipeline. * * Functions that are used in the pipeline should be registered if the pipeline * needs to be serialised, or a serialised pipeline needs to be loaded. * * Registering a function does not add it to a pipeline, functions must still be * added to instances of the pipeline for them to be used when running a pipeline. * * @param {Function} fn The function to register. * @param {String} label The label to register this function with * @memberOf Pipeline */ elasticlunr.Pipeline.registerFunction = function (fn, label) { if (label in elasticlunr.Pipeline.registeredFunctions) { elasticlunr.utils.warn('Overwriting existing registered function: ' + label); } fn.label = label; elasticlunr.Pipeline.registeredFunctions[label] = fn; }; /** * Get a registered function in the pipeline. * * @param {String} label The label of registered function. * @return {Function} * @memberOf Pipeline */ elasticlunr.Pipeline.getRegisteredFunction = function (label) { if ((label in elasticlunr.Pipeline.registeredFunctions) !== true) { return null; } return elasticlunr.Pipeline.registeredFunctions[label]; }; /** * Warns if the function is not registered as a Pipeline function. * * @param {Function} fn The function to check for. * @private * @memberOf Pipeline */ elasticlunr.Pipeline.warnIfFunctionNotRegistered = function (fn) { var isRegistered = fn.label && (fn.label in this.registeredFunctions); if (!isRegistered) { elasticlunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn); } }; /** * Loads a previously serialised pipeline. * * All functions to be loaded must already be registered with elasticlunr.Pipeline. * If any function from the serialised data has not been registered then an * error will be thrown. * * @param {Object} serialised The serialised pipeline to load. * @return {elasticlunr.Pipeline} * @memberOf Pipeline */ elasticlunr.Pipeline.load = function (serialised) { var pipeline = new elasticlunr.Pipeline; serialised.forEach(function (fnName) { var fn = elasticlunr.Pipeline.getRegisteredFunction(fnName); if (fn) { pipeline.add(fn); } else { throw new Error('Cannot load un-registered function: ' + fnName); } }); return pipeline; }; /** * Adds new functions to the end of the pipeline. * * Logs a warning if the function has not been registered. * * @param {Function} functions Any number of functions to add to the pipeline. * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.add = function () { var fns = Array.prototype.slice.call(arguments); fns.forEach(function (fn) { elasticlunr.Pipeline.warnIfFunctionNotRegistered(fn); this._queue.push(fn); }, this); }; /** * Adds a single function after a function that already exists in the * pipeline. * * Logs a warning if the function has not been registered. * If existingFn is not found, throw an Exception. * * @param {Function} existingFn A function that already exists in the pipeline. * @param {Function} newFn The new function to add to the pipeline. * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.after = function (existingFn, newFn) { elasticlunr.Pipeline.warnIfFunctionNotRegistered(newFn); var pos = this._queue.indexOf(existingFn); if (pos === -1) { throw new Error('Cannot find existingFn'); } this._queue.splice(pos + 1, 0, newFn); }; /** * Adds a single function before a function that already exists in the * pipeline. * * Logs a warning if the function has not been registered. * If existingFn is not found, throw an Exception. * * @param {Function} existingFn A function that already exists in the pipeline. * @param {Function} newFn The new function to add to the pipeline. * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.before = function (existingFn, newFn) { elasticlunr.Pipeline.warnIfFunctionNotRegistered(newFn); var pos = this._queue.indexOf(existingFn); if (pos === -1) { throw new Error('Cannot find existingFn'); } this._queue.splice(pos, 0, newFn); }; /** * Removes a function from the pipeline. * * @param {Function} fn The function to remove from the pipeline. * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.remove = function (fn) { var pos = this._queue.indexOf(fn); if (pos === -1) { return; } this._queue.splice(pos, 1); }; /** * Runs the current list of functions that registered in the pipeline against the * input tokens. * * @param {Array} tokens The tokens to run through the pipeline. * @return {Array} * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.run = function (tokens) { var out = [], tokenLength = tokens.length, pipelineLength = this._queue.length; for (var i = 0; i < tokenLength; i++) { var token = tokens[i]; for (var j = 0; j < pipelineLength; j++) { token = this._queue[j](token, i, tokens); if (token === void 0 || token === null) break; }; if (token !== void 0 && token !== null) out.push(token); }; return out; }; /** * Resets the pipeline by removing any existing processors. * * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.reset = function () { this._queue = []; }; /** * Get the pipeline if user want to check the pipeline. * * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.get = function () { return this._queue; }; /** * Returns a representation of the pipeline ready for serialisation. * Only serialize pipeline function's name. Not storing function, so when * loading the archived JSON index file, corresponding pipeline function is * added by registered function of elasticlunr.Pipeline.registeredFunctions * * Logs a warning if the function has not been registered. * * @return {Array} * @memberOf Pipeline */ elasticlunr.Pipeline.prototype.toJSON = function () { return this._queue.map(function (fn) { elasticlunr.Pipeline.warnIfFunctionNotRegistered(fn); return fn.label; }); }; /*! * elasticlunr.Index * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song */ /** * elasticlunr.Index is object that manages a search index. It contains the indexes * and stores all the tokens and document lookups. It also provides the main * user facing API for the library. * * @constructor */ elasticlunr.Index = function () { this._fields = []; this._ref = 'id'; this.pipeline = new elasticlunr.Pipeline; this.documentStore = new elasticlunr.DocumentStore; this.index = {}; this.eventEmitter = new elasticlunr.EventEmitter; this._idfCache = {}; this.on('add', 'remove', 'update', (function () { this._idfCache = {}; }).bind(this)); }; /** * Bind a handler to events being emitted by the index. * * The handler can be bound to many events at the same time. * * @param {String} [eventName] The name(s) of events to bind the function to. * @param {Function} fn The serialised set to load. * @memberOf Index */ elasticlunr.Index.prototype.on = function () { var args = Array.prototype.slice.call(arguments); return this.eventEmitter.addListener.apply(this.eventEmitter, args); }; /** * Removes a handler from an event being emitted by the index. * * @param {String} eventName The name of events to remove the function from. * @param {Function} fn The serialised set to load. * @memberOf Index */ elasticlunr.Index.prototype.off = function (name, fn) { return this.eventEmitter.removeListener(name, fn); }; /** * Loads a previously serialised index. * * Issues a warning if the index being imported was serialised * by a different version of elasticlunr. * * @param {Object} serialisedData The serialised set to load. * @return {elasticlunr.Index} * @memberOf Index */ elasticlunr.Index.load = function (serialisedData) { if (serialisedData.version !== elasticlunr.version) { elasticlunr.utils.warn('version mismatch: current ' + elasticlunr.version + ' importing ' + serialisedData.version); } var idx = new this; idx._fields = serialisedData.fields; idx._ref = serialisedData.ref; idx.documentStore = elasticlunr.DocumentStore.load(serialisedData.documentStore); idx.pipeline = elasticlunr.Pipeline.load(serialisedData.pipeline); idx.index = {}; for (var field in serialisedData.index) { idx.index[field] = elasticlunr.InvertedIndex.load(serialisedData.index[field]); } return idx; }; /** * Adds a field to the list of fields that will be searchable within documents in the index. * * Remember that inner index is build based on field, which means each field has one inverted index. * * Fields should be added before any documents are added to the index, fields * that are added after documents are added to the index will only apply to new * documents added to the index. * * @param {String} fieldName The name of the field within the document that should be indexed * @return {elasticlunr.Index} * @memberOf Index */ elasticlunr.Index.prototype.addField = function (fieldName) { this._fields.push(fieldName); this.index[fieldName] = new elasticlunr.InvertedIndex; return this; }; /** * Sets the property used to uniquely identify documents added to the index, * by default this property is 'id'. * * This should only be changed before adding documents to the index, changing * the ref property without resetting the index can lead to unexpected results. * * @param {String} refName The property to use to uniquely identify the * documents in the index. * @param {Boolean} emitEvent Whether to emit add events, defaults to true * @return {elasticlunr.Index} * @memberOf Index */ elasticlunr.Index.prototype.setRef = function (refName) { this._ref = refName; return this; }; /** * * Set if the JSON format original documents are save into elasticlunr.DocumentStore * * Defaultly save all the original JSON documents. * * @param {Boolean} save Whether to save the original JSON documents. * @return {elasticlunr.Index} * @memberOf Index */ elasticlunr.Index.prototype.saveDocument = function (save) { this.documentStore = new elasticlunr.DocumentStore(save); return this; }; /** * Add a JSON format document to the index. * * This is the way new documents enter the index, this function will run the * fields from the document through the index's pipeline and then add it to * the index, it will then show up in search results. * * An 'add' event is emitted with the document that has been added and the index * the document has been added to. This event can be silenced by passing false * as the second argument to add. * * @param {Object} doc The JSON format document to add to the index. * @param {Boolean} emitEvent Whether or not to emit events, default true. * @memberOf Index */ elasticlunr.Index.prototype.addDoc = function (doc, emitEvent) { if (!doc) return; var emitEvent = emitEvent === undefined ? true : emitEvent; var docRef = doc[this._ref]; this.documentStore.addDoc(docRef, doc); this._fields.forEach(function (field) { var fieldTokens = this.pipeline.run(elasticlunr.tokenizer(doc[field])); this.documentStore.addFieldLength(docRef, field, fieldTokens.length); var tokenCount = {}; fieldTokens.forEach(function (token) { if (token in tokenCount) tokenCount[token] += 1; else tokenCount[token] = 1; }, this); for (var token in tokenCount) { var termFrequency = tokenCount[token]; termFrequency = Math.sqrt(termFrequency); this.index[field].addToken(token, { ref: docRef, tf: termFrequency }); } }, this); if (emitEvent) this.eventEmitter.emit('add', doc, this); }; /** * Removes a document from the index by doc ref. * * To make sure documents no longer show up in search results they can be * removed from the index using this method. * * A 'remove' event is emitted with the document that has been removed and the index * the document has been removed from. This event can be silenced by passing false * as the second argument to remove. * * If user setting DocumentStore not storing the documents, then remove doc by docRef is not allowed. * * @param {String|Integer} docRef The document ref to remove from the index. * @param {Boolean} emitEvent Whether to emit remove events, defaults to true * @memberOf Index */ elasticlunr.Index.prototype.removeDocByRef = function (docRef, emitEvent) { if (!docRef) return; if (this.documentStore.isDocStored() === false) { return; } if (!this.documentStore.hasDoc(docRef)) return; var doc = this.documentStore.getDoc(docRef); this.removeDoc(doc, false); }; /** * Removes a document from the index. * This remove operation could work even the original doc is not store in the DocumentStore. * * To make sure documents no longer show up in search results they can be * removed from the index using this method. * * A 'remove' event is emitted with the document that has been removed and the index * the document has been removed from. This event can be silenced by passing false * as the second argument to remove. * * * @param {Object} doc The document ref to remove from the index. * @param {Boolean} emitEvent Whether to emit remove events, defaults to true * @memberOf Index */ elasticlunr.Index.prototype.removeDoc = function (doc, emitEvent) { if (!doc) return; var emitEvent = emitEvent === undefined ? true : emitEvent; var docRef = doc[this._ref]; if (!this.documentStore.hasDoc(docRef)) return; this.documentStore.removeDoc(docRef); this._fields.forEach(function (field) { var fieldTokens = this.pipeline.run(elasticlunr.tokenizer(doc[field])); fieldTokens.forEach(function (token) { this.index[field].removeToken(token, docRef); }, this); }, this); if (emitEvent) this.eventEmitter.emit('remove', doc, this); }; /** * Updates a document in the index. * * When a document contained within the index gets updated, fields changed, * added or removed, to make sure it correctly matched against search queries, * it should be updated in the index. * * This method is just a wrapper around `remove` and `add` * * An 'update' event is emitted with the document that has been updated and the index. * This event can be silenced by passing false as the second argument to update. Only * an update event will be fired, the 'add' and 'remove' events of the underlying calls * are silenced. * * @param {Object} doc The document to update in the index. * @param {Boolean} emitEvent Whether to emit update events, defaults to true * @see Index.prototype.remove * @see Index.prototype.add * @memberOf Index */ elasticlunr.Index.prototype.updateDoc = function (doc, emitEvent) { var emitEvent = emitEvent === undefined ? true : emitEvent; this.removeDocByRef(doc[this._ref], false); this.addDoc(doc, false); if (emitEvent) this.eventEmitter.emit('update', doc, this); }; /** * Calculates the inverse document frequency for a token within the index of a field. * * @param {String} token The token to calculate the idf of. * @param {String} field The field to compute idf. * @see Index.prototype.idf * @private * @memberOf Index */ elasticlunr.Index.prototype.idf = function (term, field) { var cacheKey = "@" + field + '/' + term; if (Object.prototype.hasOwnProperty.call(this._idfCache, cacheKey)) return this._idfCache[cacheKey]; var df = this.index[field].getDocFreq(term); var idf = 1 + Math.log(this.documentStore.length / (df + 1)); this._idfCache[cacheKey] = idf; return idf; }; /** * get fields of current index instance * * @return {Array} */ elasticlunr.Index.prototype.getFields = function () { return this._fields.slice(); }; /** * Searches the index using the passed query. * Queries should be a string, multiple words are allowed. * * If config is null, will search all fields defaultly, and lead to OR based query. * If config is specified, will search specified with query time boosting. * * All query tokens are passed through the same pipeline that document tokens * are passed through, so any language processing involved will be run on every * query term. * * Each query term is expanded, so that the term 'he' might be expanded to * 'hello' and 'help' if those terms were already included in the index. * * Matching documents are returned as an array of objects, each object contains * the matching document ref, as set for this index, and the similarity score * for this document against the query. * * @param {String} query The query to search the index with. * @param {JSON} userConfig The user query config, JSON format. * @return {Object} * @see Index.prototype.idf * @see Index.prototype.documentVector * @memberOf Index */ elasticlunr.Index.prototype.search = function (query, userConfig) { if (!query) return []; var configStr = null; if (userConfig != null) { configStr = JSON.stringify(userConfig); } var config = new elasticlunr.Configuration(configStr, this.getFields()).get(); var queryTokens = this.pipeline.run(elasticlunr.tokenizer(query)); var queryResults = {}; for (var field in config) { var fieldSearchResults = this.fieldSearch(queryTokens, field, config); var fieldBoost = config[field].boost; for (var docRef in fieldSearchResults) { fieldSearchResults[docRef] = fieldSearchResults[docRef] * fieldBoost; } for (var docRef in fieldSearchResults) { if (docRef in queryResults) { queryResults[docRef] += fieldSearchResults[docRef]; } else { queryResults[docRef] = fieldSearchResults[docRef]; } } } var results = []; for (var docRef in queryResults) { results.push({ref: docRef, score: queryResults[docRef]}); } results.sort(function (a, b) { return b.score - a.score; }); return results; }; /** * search queryTokens in specified field. * * @param {Array} queryTokens The query tokens to query in this field. * @param {String} field Field to query in. * @param {elasticlunr.Configuration} config The user query config, JSON format. * @return {Object} */ elasticlunr.Index.prototype.fieldSearch = function (queryTokens, fieldName, config) { var booleanType = config[fieldName].bool; var expand = config[fieldName].expand; var boost = config[fieldName].boost; var scores = null; var docTokens = {}; // Do nothing if the boost is 0 if (boost === 0) { return; } queryTokens.forEach(function (token) { var tokens = [token]; if (expand == true) { tokens = this.index[fieldName].expandToken(token); } // Consider every query token in turn. If expanded, each query token // corresponds to a set of tokens, which is all tokens in the // index matching the pattern queryToken* . // For the set of tokens corresponding to a query token, find and score // all matching documents. Store those scores in queryTokenScores, // keyed by docRef. // Then, depending on the value of booleanType, combine the scores // for this query token with previous scores. If booleanType is OR, // then merge the scores by summing into the accumulated total, adding // new document scores are required (effectively a union operator). // If booleanType is AND, accumulate scores only if the document // has previously been scored by another query token (an intersection // operation0. // Furthermore, since when booleanType is AND, additional // query tokens can't add new documents to the result set, use the // current document set to limit the processing of each new query // token for efficiency (i.e., incremental intersection). var queryTokenScores = {}; tokens.forEach(function (key) { var docs = this.index[fieldName].getDocs(key); var idf = this.idf(key, fieldName); if (scores && booleanType == 'AND') { // special case, we can rule out documents that have been // already been filtered out because they weren't scored // by previous query token passes. var filteredDocs = {}; for (var docRef in scores) { if (docRef in docs) { filteredDocs[docRef] = docs[docRef]; } } docs = filteredDocs; } // only record appeared token for retrieved documents for the // original token, not for expaned token. // beause for doing coordNorm for a retrieved document, coordNorm only care how many // query token appear in that document. // so expanded token should not be added into docTokens, if added, this will pollute the // coordNorm if (key == token) { this.fieldSearchStats(docTokens, key, docs); } for (var docRef in docs) { var tf = this.index[fieldName].getTermFrequency(key, docRef); var fieldLength = this.documentStore.getFieldLength(docRef, fieldName); var fieldLengthNorm = 1; if (fieldLength != 0) { fieldLengthNorm = 1 / Math.sqrt(fieldLength); } var penality = 1; if (key != token) { // currently I'm not sure if this penality is enough, // need to do verification penality = (1 - (key.length - token.length) / key.length) * 0.15; } var score = tf * idf * fieldLengthNorm * penality; if (docRef in queryTokenScores) { queryTokenScores[docRef] += score; } else { queryTokenScores[docRef] = score; } } }, this); scores = this.mergeScores(scores, queryTokenScores, booleanType); }, this); scores = this.coordNorm(scores, docTokens, queryTokens.length); return scores; }; /** * Merge the scores from one set of tokens into an accumulated score table. * Exact operation depends on the op parameter. If op is 'AND', then only the * intersection of the two score lists is retained. Otherwise, the union of * the two score lists is returned. For internal use only. * * @param {Object} bool accumulated scores. Should be null on first call. * @param {String} scores new scores to merge into accumScores. * @param {Object} op merge operation (should be 'AND' or 'OR'). * */ elasticlunr.Index.prototype.mergeScores = function (accumScores, scores, op) { if (!accumScores) { return scores; } if (op == 'AND') { var intersection = {}; for (var docRef in scores) { if (docRef in accumScores) { intersection[docRef] = accumScores[docRef] + scores[docRef]; } } return intersection; } else { for (var docRef in scores) { if (docRef in accumScores) { accumScores[docRef] += scores[docRef]; } else { accumScores[docRef] = scores[docRef]; } } return accumScores; } }; /** * Record the occuring query token of retrieved doc specified by doc field. * Only for inner user. * * @param {Object} docTokens a data structure stores which token appears in the retrieved doc. * @param {String} token query token * @param {Object} docs the retrieved documents of the query token * */ elasticlunr.Index.prototype.fieldSearchStats = function (docTokens, token, docs) { for (var doc in docs) { if (doc in docTokens) { docTokens[doc].push(token); } else { docTokens[doc] = [token]; } } }; /** * coord norm the score of a doc. * if a doc contain more query tokens, then the score will larger than the doc * contains less query tokens. * * only for inner use. * * @param {Object} results first results * @param {Object} docs field search results of a token * @param {Integer} n query token number * @return {Object} */ elasticlunr.Index.prototype.coordNorm = function (scores, docTokens, n) { for (var doc in scores) { if (!(doc in docTokens)) continue; var tokens = docTokens[doc].length; scores[doc] = scores[doc] * tokens / n; } return scores; }; /** * Returns a representation of the index ready for serialisation. * * @return {Object} * @memberOf Index */ elasticlunr.Index.prototype.toJSON = function () { var indexJson = {}; this._fields.forEach(function (field) { indexJson[field] = this.index[field].toJSON(); }, this); return { version: elasticlunr.version, fields: this._fields, ref: this._ref, documentStore: this.documentStore.toJSON(), index: indexJson, pipeline: this.pipeline.toJSON() }; }; /** * Applies a plugin to the current index. * * A plugin is a function that is called with the index as its context. * Plugins can be used to customise or extend the behaviour the index * in some way. A plugin is just a function, that encapsulated the custom * behaviour that should be applied to the index. * * The plugin function will be called with the index as its argument, additional * arguments can also be passed when calling use. The function will be called * with the index as its context. * * Example: * * var myPlugin = function (idx, arg1, arg2) { * // `this` is the index to be extended * // apply any extensions etc here. * } * * var idx = elasticlunr(function () { * this.use(myPlugin, 'arg1', 'arg2') * }) * * @param {Function} plugin The plugin to apply. * @memberOf Index */ elasticlunr.Index.prototype.use = function (plugin) { var args = Array.prototype.slice.call(arguments, 1); args.unshift(this); plugin.apply(this, args); }; /*! * elasticlunr.DocumentStore * Copyright (C) 2016 Wei Song */ /** * elasticlunr.DocumentStore is a simple key-value document store used for storing sets of tokens for * documents stored in index. * * elasticlunr.DocumentStore store original JSON format documents that you could build search snippet by this original JSON document. * * user could choose whether original JSON format document should be store, if no configuration then document will be stored defaultly. * If user care more about the index size, user could select not store JSON documents, then this will has some defects, such as user * could not use JSON document to generate snippets of search results. * * @param {Boolean} save If the original JSON document should be stored. * @constructor * @module */ elasticlunr.DocumentStore = function (save) { if (save === null || save === undefined) { this._save = true; } else { this._save = save; } this.docs = {}; this.docInfo = {}; this.length = 0; }; /** * Loads a previously serialised document store * * @param {Object} serialisedData The serialised document store to load. * @return {elasticlunr.DocumentStore} */ elasticlunr.DocumentStore.load = function (serialisedData) { var store = new this; store.length = serialisedData.length; store.docs = serialisedData.docs; store.docInfo = serialisedData.docInfo; store._save = serialisedData.save; return store; }; /** * check if current instance store the original doc * * @return {Boolean} */ elasticlunr.DocumentStore.prototype.isDocStored = function () { return this._save; }; /** * Stores the given doc in the document store against the given id. * If docRef already exist, then update doc. * * Document is store by original JSON format, then you could use original document to generate search snippets. * * @param {Integer|String} docRef The key used to store the JSON format doc. * @param {Object} doc The JSON format doc. */ elasticlunr.DocumentStore.prototype.addDoc = function (docRef, doc) { if (!this.hasDoc(docRef)) this.length++; if (this._save === true) { this.docs[docRef] = clone(doc); } else { this.docs[docRef] = null; } }; /** * Retrieves the JSON doc from the document store for a given key. * * If docRef not found, return null. * If user set not storing the documents, return null. * * @param {Integer|String} docRef The key to lookup and retrieve from the document store. * @return {Object} * @memberOf DocumentStore */ elasticlunr.DocumentStore.prototype.getDoc = function (docRef) { if (this.hasDoc(docRef) === false) return null; return this.docs[docRef]; }; /** * Checks whether the document store contains a key (docRef). * * @param {Integer|String} docRef The id to look up in the document store. * @return {Boolean} * @memberOf DocumentStore */ elasticlunr.DocumentStore.prototype.hasDoc = function (docRef) { return docRef in this.docs; }; /** * Removes the value for a key in the document store. * * @param {Integer|String} docRef The id to remove from the document store. * @memberOf DocumentStore */ elasticlunr.DocumentStore.prototype.removeDoc = function (docRef) { if (!this.hasDoc(docRef)) return; delete this.docs[docRef]; delete this.docInfo[docRef]; this.length--; }; /** * Add field length of a document's field tokens from pipeline results. * The field length of a document is used to do field length normalization even without the original JSON document stored. * * @param {Integer|String} docRef document's id or reference * @param {String} fieldName field name * @param {Integer} length field length */ elasticlunr.DocumentStore.prototype.addFieldLength = function (docRef, fieldName, length) { if (docRef === null || docRef === undefined) return; if (this.hasDoc(docRef) == false) return; if (!this.docInfo[docRef]) this.docInfo[docRef] = {}; this.docInfo[docRef][fieldName] = length; }; /** * Update field length of a document's field tokens from pipeline results. * The field length of a document is used to do field length normalization even without the original JSON document stored. * * @param {Integer|String} docRef document's id or reference * @param {String} fieldName field name * @param {Integer} length field length */ elasticlunr.DocumentStore.prototype.updateFieldLength = function (docRef, fieldName, length) { if (docRef === null || docRef === undefined) return; if (this.hasDoc(docRef) == false) return; this.addFieldLength(docRef, fieldName, length); }; /** * get field length of a document by docRef * * @param {Integer|String} docRef document id or reference * @param {String} fieldName field name * @return {Integer} field length */ elasticlunr.DocumentStore.prototype.getFieldLength = function (docRef, fieldName) { if (docRef === null || docRef === undefined) return 0; if (!(docRef in this.docs)) return 0; if (!(fieldName in this.docInfo[docRef])) return 0; return this.docInfo[docRef][fieldName]; }; /** * Returns a JSON representation of the document store used for serialisation. * * @return {Object} JSON format * @memberOf DocumentStore */ elasticlunr.DocumentStore.prototype.toJSON = function () { return { docs: this.docs, docInfo: this.docInfo, length: this.length, save: this._save }; }; /** * Cloning object * * @param {Object} object in JSON format * @return {Object} copied object */ function clone(obj) { if (null === obj || "object" !== typeof obj) return obj; var copy = obj.constructor(); for (var attr in obj) { if (obj.hasOwnProperty(attr)) copy[attr] = obj[attr]; } return copy; } /*! * elasticlunr.stemmer * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt */ /** * elasticlunr.stemmer is an english language stemmer, this is a JavaScript * implementation of the PorterStemmer taken from http://tartarus.org/~martin * * @module * @param {String} str The string to stem * @return {String} * @see elasticlunr.Pipeline */ elasticlunr.stemmer = (function(){ var step2list = { "ational" : "ate", "tional" : "tion", "enci" : "ence", "anci" : "ance", "izer" : "ize", "bli" : "ble", "alli" : "al", "entli" : "ent", "eli" : "e", "ousli" : "ous", "ization" : "ize", "ation" : "ate", "ator" : "ate", "alism" : "al", "iveness" : "ive", "fulness" : "ful", "ousness" : "ous", "aliti" : "al", "iviti" : "ive", "biliti" : "ble", "logi" : "log" }, step3list = { "icate" : "ic", "ative" : "", "alize" : "al", "iciti" : "ic", "ical" : "ic", "ful" : "", "ness" : "" }, c = "[^aeiou]", // consonant v = "[aeiouy]", // vowel C = c + "[^aeiouy]*", // consonant sequence V = v + "[aeiou]*", // vowel sequence mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0 meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1 mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1 s_v = "^(" + C + ")?" + v; // vowel in stem var re_mgr0 = new RegExp(mgr0); var re_mgr1 = new RegExp(mgr1); var re_meq1 = new RegExp(meq1); var re_s_v = new RegExp(s_v); var re_1a = /^(.+?)(ss|i)es$/; var re2_1a = /^(.+?)([^s])s$/; var re_1b = /^(.+?)eed$/; var re2_1b = /^(.+?)(ed|ing)$/; var re_1b_2 = /.$/; var re2_1b_2 = /(at|bl|iz)$/; var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$"); var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$"); var re_1c = /^(.+?[^aeiou])y$/; var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; var re2_4 = /^(.+?)(s|t)(ion)$/; var re_5 = /^(.+?)e$/; var re_5_1 = /ll$/; var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$"); var porterStemmer = function porterStemmer(w) { var stem, suffix, firstch, re, re2, re3, re4; if (w.length < 3) { return w; } firstch = w.substr(0,1); if (firstch == "y") { w = firstch.toUpperCase() + w.substr(1); } // Step 1a re = re_1a re2 = re2_1a; if (re.test(w)) { w = w.replace(re,"$1$2"); } else if (re2.test(w)) { w = w.replace(re2,"$1$2"); } // Step 1b re = re_1b; re2 = re2_1b; if (re.test(w)) { var fp = re.exec(w); re = re_mgr0; if (re.test(fp[1])) { re = re_1b_2; w = w.replace(re,""); } } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1]; re2 = re_s_v; if (re2.test(stem)) { w = stem; re2 = re2_1b_2; re3 = re3_1b_2; re4 = re4_1b_2; if (re2.test(w)) { w = w + "e"; } else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); } else if (re4.test(w)) { w = w + "e"; } } } // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say) re = re_1c; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; w = stem + "i"; } // Step 2 re = re_2; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = re_mgr0; if (re.test(stem)) { w = stem + step2list[suffix]; } } // Step 3 re = re_3; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = re_mgr0; if (re.test(stem)) { w = stem + step3list[suffix]; } } // Step 4 re = re_4; re2 = re2_4; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = re_mgr1; if (re.test(stem)) { w = stem; } } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1] + fp[2]; re2 = re_mgr1; if (re2.test(stem)) { w = stem; } } // Step 5 re = re_5; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = re_mgr1; re2 = re_meq1; re3 = re3_5; if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { w = stem; } } re = re_5_1; re2 = re_mgr1; if (re.test(w) && re2.test(w)) { re = re_1b_2; w = w.replace(re,""); } // and turn initial Y back to y if (firstch == "y") { w = firstch.toLowerCase() + w.substr(1); } return w; }; return porterStemmer; })(); elasticlunr.Pipeline.registerFunction(elasticlunr.stemmer, 'stemmer'); /*! * elasticlunr.stopWordFilter * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song */ /** * elasticlunr.stopWordFilter is an English language stop words filter, any words * contained in the stop word list will not be passed through the filter. * * This is intended to be used in the Pipeline. If the token does not pass the * filter then undefined will be returned. * Currently this StopwordFilter using dictionary to do O(1) time complexity stop word filtering. * * @module * @param {String} token The token to pass through the filter * @return {String} * @see elasticlunr.Pipeline */ elasticlunr.stopWordFilter = function (token) { if (token && elasticlunr.stopWordFilter.stopWords[token] !== true) { return token; } }; /** * Remove predefined stop words * if user want to use customized stop words, user could use this function to delete * all predefined stopwords. * * @return {null} */ elasticlunr.clearStopWords = function () { elasticlunr.stopWordFilter.stopWords = {}; }; /** * Add customized stop words * user could use this function to add customized stop words * * @params {Array} words customized stop words * @return {null} */ elasticlunr.addStopWords = function (words) { if (words == null || Array.isArray(words) === false) return; words.forEach(function (word) { elasticlunr.stopWordFilter.stopWords[word] = true; }, this); }; /** * Reset to default stop words * user could use this function to restore default stop words * * @return {null} */ elasticlunr.resetStopWords = function () { elasticlunr.stopWordFilter.stopWords = elasticlunr.defaultStopWords; }; elasticlunr.defaultStopWords = { "": true, "a": true, "able": true, "about": true, "across": true, "after": true, "all": true, "almost": true, "also": true, "am": true, "among": true, "an": true, "and": true, "any": true, "are": true, "as": true, "at": true, "be": true, "because": true, "been": true, "but": true, "by": true, "can": true, "cannot": true, "could": true, "dear": true, "did": true, "do": true, "does": true, "either": true, "else": true, "ever": true, "every": true, "for": true, "from": true, "get": true, "got": true, "had": true, "has": true, "have": true, "he": true, "her": true, "hers": true, "him": true, "his": true, "how": true, "however": true, "i": true, "if": true, "in": true, "into": true, "is": true, "it": true, "its": true, "just": true, "least": true, "let": true, "like": true, "likely": true, "may": true, "me": true, "might": true, "most": true, "must": true, "my": true, "neither": true, "no": true, "nor": true, "not": true, "of": true, "off": true, "often": true, "on": true, "only": true, "or": true, "other": true, "our": true, "own": true, "rather": true, "said": true, "say": true, "says": true, "she": true, "should": true, "since": true, "so": true, "some": true, "than": true, "that": true, "the": true, "their": true, "them": true, "then": true, "there": true, "these": true, "they": true, "this": true, "tis": true, "to": true, "too": true, "twas": true, "us": true, "wants": true, "was": true, "we": true, "were": true, "what": true, "when": true, "where": true, "which": true, "while": true, "who": true, "whom": true, "why": true, "will": true, "with": true, "would": true, "yet": true, "you": true, "your": true }; elasticlunr.stopWordFilter.stopWords = elasticlunr.defaultStopWords; elasticlunr.Pipeline.registerFunction(elasticlunr.stopWordFilter, 'stopWordFilter'); /*! * elasticlunr.trimmer * Copyright (C) 2016 Oliver Nightingale * Copyright (C) 2016 Wei Song */ /** * elasticlunr.trimmer is a pipeline function for trimming non word * characters from the begining and end of tokens before they * enter the index. * * This implementation may not work correctly for non latin * characters and should either be removed or adapted for use * with languages with non-latin characters. * * @module * @param {String} token The token to pass through the filter * @return {String} * @see elasticlunr.Pipeline */ elasticlunr.trimmer = function (token) { if (token === null || token === undefined) { throw new Error('token should not be undefined'); } return token .replace(/^\W+/, '') .replace(/\W+$/, ''); }; elasticlunr.Pipeline.registerFunction(elasticlunr.trimmer, 'trimmer'); /*! * elasticlunr.InvertedIndex * Copyright (C) 2016 Wei Song * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt */ /** * elasticlunr.InvertedIndex is used for efficiently storing and * lookup of documents that contain a given token. * * @constructor */ elasticlunr.InvertedIndex = function () { this.root = { docs: {}, df: 0 }; }; /** * Loads a previously serialised inverted index. * * @param {Object} serialisedData The serialised inverted index to load. * @return {elasticlunr.InvertedIndex} */ elasticlunr.InvertedIndex.load = function (serialisedData) { var idx = new this; idx.root = serialisedData.root; return idx; }; /** * Adds a {token: tokenInfo} pair to the inverted index. * If the token already exist, then update the tokenInfo. * * tokenInfo format: { ref: 1, tf: 2} * tokenInfor should contains the document's ref and the tf(token frequency) of that token in * the document. * * By default this function starts at the root of the current inverted index, however * it can start at any node of the inverted index if required. * * @param {String} token * @param {Object} tokenInfo format: { ref: 1, tf: 2} * @param {Object} root An optional node at which to start looking for the * correct place to enter the doc, by default the root of this elasticlunr.InvertedIndex * is used. * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.addToken = function (token, tokenInfo, root) { var root = root || this.root, idx = 0; while (idx <= token.length - 1) { var key = token[idx]; if (!(key in root)) root[key] = {docs: {}, df: 0}; idx += 1; root = root[key]; } var docRef = tokenInfo.ref; if (!root.docs[docRef]) { // if this doc not exist, then add this doc root.docs[docRef] = {tf: tokenInfo.tf}; root.df += 1; } else { // if this doc already exist, then update tokenInfo root.docs[docRef] = {tf: tokenInfo.tf}; } }; /** * Checks whether a token is in this elasticlunr.InvertedIndex. * * * @param {String} token The token to be checked * @return {Boolean} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.hasToken = function (token) { if (!token) return false; var node = this.root; for (var i = 0; i < token.length; i++) { if (!node[token[i]]) return false; node = node[token[i]]; } return true; }; /** * Retrieve a node from the inverted index for a given token. * If token not found in this InvertedIndex, return null. * * * @param {String} token The token to get the node for. * @return {Object} * @see InvertedIndex.prototype.get * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.getNode = function (token) { if (!token) return null; var node = this.root; for (var i = 0; i < token.length; i++) { if (!node[token[i]]) return null; node = node[token[i]]; } return node; }; /** * Retrieve the documents of a given token. * If token not found, return {}. * * * @param {String} token The token to get the documents for. * @return {Object} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.getDocs = function (token) { var node = this.getNode(token); if (node == null) { return {}; } return node.docs; }; /** * Retrieve term frequency of given token in given docRef. * If token or docRef not found, return 0. * * * @param {String} token The token to get the documents for. * @param {String|Integer} docRef * @return {Integer} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.getTermFrequency = function (token, docRef) { var node = this.getNode(token); if (node == null) { return 0; } if (!(docRef in node.docs)) { return 0; } return node.docs[docRef].tf; }; /** * Retrieve the document frequency of given token. * If token not found, return 0. * * * @param {String} token The token to get the documents for. * @return {Object} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.getDocFreq = function (token) { var node = this.getNode(token); if (node == null) { return 0; } return node.df; }; /** * Remove the document identified by document's ref from the token in the inverted index. * * * @param {String} token Remove the document from which token. * @param {String} ref The ref of the document to remove from given token. * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.removeToken = function (token, ref) { if (!token) return; var node = this.getNode(token); if (node == null) return; if (ref in node.docs) { delete node.docs[ref]; node.df -= 1; } }; /** * Find all the possible suffixes of given token using tokens currently in the inverted index. * If token not found, return empty Array. * * @param {String} token The token to expand. * @return {Array} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.expandToken = function (token, memo, root) { if (token == null || token == '') return []; var memo = memo || []; if (root == void 0) { root = this.getNode(token); if (root == null) return memo; } if (root.df > 0) memo.push(token); for (var key in root) { if (key === 'docs') continue; if (key === 'df') continue; this.expandToken(token + key, memo, root[key]); } return memo; }; /** * Returns a representation of the inverted index ready for serialisation. * * @return {Object} * @memberOf InvertedIndex */ elasticlunr.InvertedIndex.prototype.toJSON = function () { return { root: this.root }; }; /*! * elasticlunr.Configuration * Copyright (C) 2016 Wei Song */ /** * elasticlunr.Configuration is used to analyze the user search configuration. * * By elasticlunr.Configuration user could set query-time boosting, boolean model in each field. * * Currently configuration supports: * 1. query-time boosting, user could set how to boost each field. * 2. boolean model chosing, user could choose which boolean model to use for each field. * 3. token expandation, user could set token expand to True to improve Recall. Default is False. * * Query time boosting must be configured by field category, "boolean" model could be configured * by both field category or globally as the following example. Field configuration for "boolean" * will overwrite global configuration. * Token expand could be configured both by field category or golbally. Local field configuration will * overwrite global configuration. * * configuration example: * { * fields:{ * title: {boost: 2}, * body: {boost: 1} * }, * bool: "OR" * } * * "bool" field configuation overwrite global configuation example: * { * fields:{ * title: {boost: 2, bool: "AND"}, * body: {boost: 1} * }, * bool: "OR" * } * * "expand" example: * { * fields:{ * title: {boost: 2, bool: "AND"}, * body: {boost: 1} * }, * bool: "OR", * expand: true * } * * "expand" example for field category: * { * fields:{ * title: {boost: 2, bool: "AND", expand: true}, * body: {boost: 1} * }, * bool: "OR" * } * * setting the boost to 0 ignores the field (this will only search the title): * { * fields:{ * title: {boost: 1}, * body: {boost: 0} * } * } * * then, user could search with configuration to do query-time boosting. * idx.search('oracle database', {fields: {title: {boost: 2}, body: {boost: 1}}}); * * * @constructor * * @param {String} config user configuration * @param {Array} fields fields of index instance * @module */ elasticlunr.Configuration = function (config, fields) { var config = config || ''; if (fields == undefined || fields == null) { throw new Error('fields should not be null'); } this.config = {}; var userConfig; try { userConfig = JSON.parse(config); this.buildUserConfig(userConfig, fields); } catch (error) { elasticlunr.utils.warn('user configuration parse failed, will use default configuration'); this.buildDefaultConfig(fields); } }; /** * Build default search configuration. * * @param {Array} fields fields of index instance */ elasticlunr.Configuration.prototype.buildDefaultConfig = function (fields) { this.reset(); fields.forEach(function (field) { this.config[field] = { boost: 1, bool: "OR", expand: false }; }, this); }; /** * Build user configuration. * * @param {JSON} config User JSON configuratoin * @param {Array} fields fields of index instance */ elasticlunr.Configuration.prototype.buildUserConfig = function (config, fields) { var global_bool = "OR"; var global_expand = false; this.reset(); if ('bool' in config) { global_bool = config['bool'] || global_bool; } if ('expand' in config) { global_expand = config['expand'] || global_expand; } if ('fields' in config) { for (var field in config['fields']) { if (fields.indexOf(field) > -1) { var field_config = config['fields'][field]; var field_expand = global_expand; if (field_config.expand != undefined) { field_expand = field_config.expand; } this.config[field] = { boost: (field_config.boost || field_config.boost === 0) ? field_config.boost : 1, bool: field_config.bool || global_bool, expand: field_expand }; } else { elasticlunr.utils.warn('field name in user configuration not found in index instance fields'); } } } else { this.addAllFields2UserConfig(global_bool, global_expand, fields); } }; /** * Add all fields to user search configuration. * * @param {String} bool Boolean model * @param {String} expand Expand model * @param {Array} fields fields of index instance */ elasticlunr.Configuration.prototype.addAllFields2UserConfig = function (bool, expand, fields) { fields.forEach(function (field) { this.config[field] = { boost: 1, bool: bool, expand: expand }; }, this); }; /** * get current user configuration */ elasticlunr.Configuration.prototype.get = function () { return this.config; }; /** * reset user search configuration. */ elasticlunr.Configuration.prototype.reset = function () { this.config = {}; }; /** * sorted_set.js is added only to make elasticlunr.js compatible with lunr-languages. * if elasticlunr.js support different languages by default, this will make elasticlunr.js * much bigger that not good for browser usage. * */ /*! * lunr.SortedSet * Copyright (C) 2016 Oliver Nightingale */ /** * lunr.SortedSets are used to maintain an array of uniq values in a sorted * order. * * @constructor */ lunr.SortedSet = function () { this.length = 0 this.elements = [] } /** * Loads a previously serialised sorted set. * * @param {Array} serialisedData The serialised set to load. * @returns {lunr.SortedSet} * @memberOf SortedSet */ lunr.SortedSet.load = function (serialisedData) { var set = new this set.elements = serialisedData set.length = serialisedData.length return set } /** * Inserts new items into the set in the correct position to maintain the * order. * * @param {Object} The objects to add to this set. * @memberOf SortedSet */ lunr.SortedSet.prototype.add = function () { var i, element for (i = 0; i < arguments.length; i++) { element = arguments[i] if (~this.indexOf(element)) continue this.elements.splice(this.locationFor(element), 0, element) } this.length = this.elements.length } /** * Converts this sorted set into an array. * * @returns {Array} * @memberOf SortedSet */ lunr.SortedSet.prototype.toArray = function () { return this.elements.slice() } /** * Creates a new array with the results of calling a provided function on every * element in this sorted set. * * Delegates to Array.prototype.map and has the same signature. * * @param {Function} fn The function that is called on each element of the * set. * @param {Object} ctx An optional object that can be used as the context * for the function fn. * @returns {Array} * @memberOf SortedSet */ lunr.SortedSet.prototype.map = function (fn, ctx) { return this.elements.map(fn, ctx) } /** * Executes a provided function once per sorted set element. * * Delegates to Array.prototype.forEach and has the same signature. * * @param {Function} fn The function that is called on each element of the * set. * @param {Object} ctx An optional object that can be used as the context * @memberOf SortedSet * for the function fn. */ lunr.SortedSet.prototype.forEach = function (fn, ctx) { return this.elements.forEach(fn, ctx) } /** * Returns the index at which a given element can be found in the * sorted set, or -1 if it is not present. * * @param {Object} elem The object to locate in the sorted set. * @returns {Number} * @memberOf SortedSet */ lunr.SortedSet.prototype.indexOf = function (elem) { var start = 0, end = this.elements.length, sectionLength = end - start, pivot = start + Math.floor(sectionLength / 2), pivotElem = this.elements[pivot] while (sectionLength > 1) { if (pivotElem === elem) return pivot if (pivotElem < elem) start = pivot if (pivotElem > elem) end = pivot sectionLength = end - start pivot = start + Math.floor(sectionLength / 2) pivotElem = this.elements[pivot] } if (pivotElem === elem) return pivot return -1 } /** * Returns the position within the sorted set that an element should be * inserted at to maintain the current order of the set. * * This function assumes that the element to search for does not already exist * in the sorted set. * * @param {Object} elem The elem to find the position for in the set * @returns {Number} * @memberOf SortedSet */ lunr.SortedSet.prototype.locationFor = function (elem) { var start = 0, end = this.elements.length, sectionLength = end - start, pivot = start + Math.floor(sectionLength / 2), pivotElem = this.elements[pivot] while (sectionLength > 1) { if (pivotElem < elem) start = pivot if (pivotElem > elem) end = pivot sectionLength = end - start pivot = start + Math.floor(sectionLength / 2) pivotElem = this.elements[pivot] } if (pivotElem > elem) return pivot if (pivotElem < elem) return pivot + 1 } /** * Creates a new lunr.SortedSet that contains the elements in the intersection * of this set and the passed set. * * @param {lunr.SortedSet} otherSet The set to intersect with this set. * @returns {lunr.SortedSet} * @memberOf SortedSet */ lunr.SortedSet.prototype.intersect = function (otherSet) { var intersectSet = new lunr.SortedSet, i = 0, j = 0, a_len = this.length, b_len = otherSet.length, a = this.elements, b = otherSet.elements while (true) { if (i > a_len - 1 || j > b_len - 1) break if (a[i] === b[j]) { intersectSet.add(a[i]) i++, j++ continue } if (a[i] < b[j]) { i++ continue } if (a[i] > b[j]) { j++ continue } }; return intersectSet } /** * Makes a copy of this set * * @returns {lunr.SortedSet} * @memberOf SortedSet */ lunr.SortedSet.prototype.clone = function () { var clone = new lunr.SortedSet clone.elements = this.toArray() clone.length = clone.elements.length return clone } /** * Creates a new lunr.SortedSet that contains the elements in the union * of this set and the passed set. * * @param {lunr.SortedSet} otherSet The set to union with this set. * @returns {lunr.SortedSet} * @memberOf SortedSet */ lunr.SortedSet.prototype.union = function (otherSet) { var longSet, shortSet, unionSet if (this.length >= otherSet.length) { longSet = this, shortSet = otherSet } else { longSet = otherSet, shortSet = this } unionSet = longSet.clone() for(var i = 0, shortSetElements = shortSet.toArray(); i < shortSetElements.length; i++){ unionSet.add(shortSetElements[i]) } return unionSet } /** * Returns a representation of the sorted set ready for serialisation. * * @returns {Array} * @memberOf SortedSet */ lunr.SortedSet.prototype.toJSON = function () { return this.toArray() } /** * export the module via AMD, CommonJS or as a browser global * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js */ ;(function (root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define(factory) } else if (typeof exports === 'object') { /** * Node. Does not work with strict CommonJS, but * only CommonJS-like enviroments that support module.exports, * like Node. */ module.exports = factory() } else { // Browser globals (root is window) root.elasticlunr = factory() } }(this, function () { /** * Just return a value to define the module export. * This example returns an object, but the module * can return a function as the exported value. */ return elasticlunr })) })();