2009-03-13 18:43:16 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2003-2009 The Music Player Daemon Project
|
|
|
|
* http://www.musicpd.org
|
2004-02-24 00:41:20 +01:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2009-03-13 18:43:16 +01:00
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
2004-02-24 00:41:20 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "tag.h"
|
2008-09-07 13:28:01 +02:00
|
|
|
#include "tag_internal.h"
|
tag: added a pool for tag items
The new source tag_pool.c manages a pool of reference counted tag_item
objects. This is used to merge tag items of the same type and value,
saving lots of memory. Formerly, only the value itself was pooled,
wasting memory for all the pointers and tag_item structs.
The following results were measured with massif. Started MPD on
amd64, typed "mpc", no song being played. My music database contains
35k tagged songs. The results are what massif reports as "peak".
0.13.2: total 14,131,392; useful 11,408,972; extra 2,722,420
eric: total 18,370,696; useful 15,648,182; extra 2,722,514
mk f34f694: total 15,833,952; useful 13,111,470; extra 2,722,482
mk now: total 12,837,632; useful 10,626,383; extra 2,211,249
This patch set saves 20% memory, and does a good job in reducing heap
fragmentation.
2008-08-29 09:38:37 +02:00
|
|
|
#include "tag_pool.h"
|
2004-10-05 19:16:26 +02:00
|
|
|
#include "conf.h"
|
2007-03-20 21:12:53 +01:00
|
|
|
#include "song.h"
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2008-10-31 13:57:10 +01:00
|
|
|
#include <glib.h>
|
2008-10-08 10:49:29 +02:00
|
|
|
#include <assert.h>
|
2008-11-01 14:33:14 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2008-10-08 10:49:29 +02:00
|
|
|
|
2008-09-06 15:31:55 +02:00
|
|
|
/**
|
|
|
|
* Maximum number of items managed in the bulk list; if it is
|
|
|
|
* exceeded, we switch back to "normal" reallocation.
|
|
|
|
*/
|
|
|
|
#define BULK_MAX 64
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bool busy;
|
2008-09-06 15:31:55 +02:00
|
|
|
#endif
|
|
|
|
struct tag_item *items[BULK_MAX];
|
|
|
|
} bulk;
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
const char *tag_item_names[TAG_NUM_OF_ITEM_TYPES] = {
|
2004-11-10 22:58:27 +01:00
|
|
|
"Artist",
|
|
|
|
"Album",
|
2009-01-13 23:43:20 +01:00
|
|
|
"AlbumArtist",
|
2004-11-10 22:58:27 +01:00
|
|
|
"Title",
|
|
|
|
"Track",
|
|
|
|
"Name",
|
|
|
|
"Genre",
|
2005-03-05 23:24:10 +01:00
|
|
|
"Date",
|
|
|
|
"Composer",
|
|
|
|
"Performer",
|
2006-04-30 21:55:56 +02:00
|
|
|
"Comment",
|
2009-01-24 20:02:55 +01:00
|
|
|
"Disc",
|
|
|
|
|
|
|
|
/* MusicBrainz tags from http://musicbrainz.org/doc/MusicBrainzTag */
|
|
|
|
[TAG_MUSICBRAINZ_ARTISTID] = "MUSICBRAINZ_ARTISTID",
|
|
|
|
[TAG_MUSICBRAINZ_ALBUMID] = "MUSICBRAINZ_ALBUMID",
|
|
|
|
[TAG_MUSICBRAINZ_ALBUMARTISTID] = "MUSICBRAINZ_ALBUMARTISTID",
|
2009-02-17 06:48:57 +01:00
|
|
|
[TAG_MUSICBRAINZ_TRACKID] = "MUSICBRAINZ_TRACKID",
|
2004-11-10 22:58:27 +01:00
|
|
|
};
|
|
|
|
|
2009-03-01 00:58:32 +01:00
|
|
|
bool ignore_tag_items[TAG_NUM_OF_ITEM_TYPES];
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2008-09-07 19:14:43 +02:00
|
|
|
static size_t items_size(const struct tag *tag)
|
|
|
|
{
|
2009-02-27 09:01:55 +01:00
|
|
|
return tag->num_items * sizeof(struct tag_item *);
|
2008-09-07 19:14:43 +02:00
|
|
|
}
|
|
|
|
|
2008-08-29 09:38:21 +02:00
|
|
|
void tag_lib_init(void)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-01-25 16:00:51 +01:00
|
|
|
const char *value;
|
2004-11-10 22:58:27 +01:00
|
|
|
int quit = 0;
|
2006-07-20 18:02:40 +02:00
|
|
|
char *temp;
|
|
|
|
char *s;
|
|
|
|
char *c;
|
2004-11-10 22:58:27 +01:00
|
|
|
int i;
|
2004-11-11 22:43:39 +01:00
|
|
|
|
|
|
|
/* parse the "metadata_to_use" config parameter below */
|
2006-07-20 18:02:40 +02:00
|
|
|
|
2009-03-01 00:58:32 +01:00
|
|
|
/* ignore comments by default */
|
|
|
|
ignore_tag_items[TAG_ITEM_COMMENT] = true;
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-01-25 16:00:51 +01:00
|
|
|
value = config_get_string(CONF_METADATA_TO_USE, NULL);
|
|
|
|
if (value == NULL)
|
2006-07-20 18:02:40 +02:00
|
|
|
return;
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-03-01 00:58:32 +01:00
|
|
|
memset(ignore_tag_items, true, TAG_NUM_OF_ITEM_TYPES);
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-04-28 09:32:38 +02:00
|
|
|
if (0 == g_ascii_strcasecmp(value, "none"))
|
2006-07-20 18:02:40 +02:00
|
|
|
return;
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-01-25 16:00:51 +01:00
|
|
|
temp = c = s = g_strdup(value);
|
2006-07-20 18:02:40 +02:00
|
|
|
while (!quit) {
|
|
|
|
if (*s == ',' || *s == '\0') {
|
|
|
|
if (*s == '\0')
|
|
|
|
quit = 1;
|
2004-11-10 22:58:27 +01:00
|
|
|
*s = '\0';
|
2006-07-20 18:02:40 +02:00
|
|
|
for (i = 0; i < TAG_NUM_OF_ITEM_TYPES; i++) {
|
2009-04-28 09:32:38 +02:00
|
|
|
if (g_ascii_strcasecmp(c, tag_item_names[i]) == 0) {
|
2009-03-01 00:58:32 +01:00
|
|
|
ignore_tag_items[i] = false;
|
2004-11-10 22:58:27 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-07-20 18:02:40 +02:00
|
|
|
if (strlen(c) && i == TAG_NUM_OF_ITEM_TYPES) {
|
2009-03-15 18:23:00 +01:00
|
|
|
g_error("error parsing metadata item \"%s\"",
|
|
|
|
c);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
s++;
|
|
|
|
c = s;
|
|
|
|
}
|
|
|
|
s++;
|
|
|
|
}
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2009-01-25 18:47:21 +01:00
|
|
|
g_free(temp);
|
2004-04-13 04:20:46 +02:00
|
|
|
}
|
|
|
|
|
2008-08-29 09:38:21 +02:00
|
|
|
struct tag *tag_new(void)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2008-11-01 14:33:14 +01:00
|
|
|
struct tag *ret = g_new(struct tag, 1);
|
2004-11-10 22:58:27 +01:00
|
|
|
ret->items = NULL;
|
2004-03-11 01:16:49 +01:00
|
|
|
ret->time = -1;
|
2009-02-27 09:01:55 +01:00
|
|
|
ret->num_items = 0;
|
2004-02-24 00:41:20 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-02-27 09:02:13 +01:00
|
|
|
static void tag_delete_item(struct tag *tag, unsigned idx)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-02-27 09:01:55 +01:00
|
|
|
assert(idx < tag->num_items);
|
|
|
|
tag->num_items--;
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_lock(tag_pool_lock);
|
tag: added a pool for tag items
The new source tag_pool.c manages a pool of reference counted tag_item
objects. This is used to merge tag items of the same type and value,
saving lots of memory. Formerly, only the value itself was pooled,
wasting memory for all the pointers and tag_item structs.
The following results were measured with massif. Started MPD on
amd64, typed "mpc", no song being played. My music database contains
35k tagged songs. The results are what massif reports as "peak".
0.13.2: total 14,131,392; useful 11,408,972; extra 2,722,420
eric: total 18,370,696; useful 15,648,182; extra 2,722,514
mk f34f694: total 15,833,952; useful 13,111,470; extra 2,722,482
mk now: total 12,837,632; useful 10,626,383; extra 2,211,249
This patch set saves 20% memory, and does a good job in reducing heap
fragmentation.
2008-08-29 09:38:37 +02:00
|
|
|
tag_pool_put_item(tag->items[idx]);
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_unlock(tag_pool_lock);
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (tag->num_items - idx > 0) {
|
2008-01-26 13:46:21 +01:00
|
|
|
memmove(tag->items + idx, tag->items + idx + 1,
|
2009-02-27 09:01:55 +01:00
|
|
|
tag->num_items - idx);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (tag->num_items > 0) {
|
2008-11-01 14:33:14 +01:00
|
|
|
tag->items = g_realloc(tag->items, items_size(tag));
|
2006-07-20 18:02:40 +02:00
|
|
|
} else {
|
2009-01-25 18:47:21 +01:00
|
|
|
g_free(tag->items);
|
2004-11-10 22:58:27 +01:00
|
|
|
tag->items = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-08-29 09:38:21 +02:00
|
|
|
void tag_clear_items_by_type(struct tag *tag, enum tag_type type)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-02-27 09:02:13 +01:00
|
|
|
for (unsigned i = 0; i < tag->num_items; i++) {
|
2008-08-29 09:38:29 +02:00
|
|
|
if (tag->items[i]->type == type) {
|
2009-02-27 09:01:55 +01:00
|
|
|
tag_delete_item(tag, i);
|
2004-11-10 22:58:27 +01:00
|
|
|
/* decrement since when just deleted this node */
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-06 18:35:41 +02:00
|
|
|
void tag_free(struct tag *tag)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2004-11-10 22:58:27 +01:00
|
|
|
int i;
|
|
|
|
|
2009-04-28 19:57:49 +02:00
|
|
|
assert(tag != NULL);
|
|
|
|
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_lock(tag_pool_lock);
|
2009-02-27 09:01:55 +01:00
|
|
|
for (i = tag->num_items; --i >= 0; )
|
tag: added a pool for tag items
The new source tag_pool.c manages a pool of reference counted tag_item
objects. This is used to merge tag items of the same type and value,
saving lots of memory. Formerly, only the value itself was pooled,
wasting memory for all the pointers and tag_item structs.
The following results were measured with massif. Started MPD on
amd64, typed "mpc", no song being played. My music database contains
35k tagged songs. The results are what massif reports as "peak".
0.13.2: total 14,131,392; useful 11,408,972; extra 2,722,420
eric: total 18,370,696; useful 15,648,182; extra 2,722,514
mk f34f694: total 15,833,952; useful 13,111,470; extra 2,722,482
mk now: total 12,837,632; useful 10,626,383; extra 2,211,249
This patch set saves 20% memory, and does a good job in reducing heap
fragmentation.
2008-08-29 09:38:37 +02:00
|
|
|
tag_pool_put_item(tag->items[i]);
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_unlock(tag_pool_lock);
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2008-09-06 15:31:55 +02:00
|
|
|
if (tag->items == bulk.items) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(bulk.busy);
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = false;
|
2008-09-06 15:31:55 +02:00
|
|
|
#endif
|
2009-01-25 18:47:21 +01:00
|
|
|
} else
|
|
|
|
g_free(tag->items);
|
2008-09-06 15:31:55 +02:00
|
|
|
|
2009-01-25 18:47:21 +01:00
|
|
|
g_free(tag);
|
2004-02-24 00:41:20 +01:00
|
|
|
}
|
|
|
|
|
2008-08-29 14:48:39 +02:00
|
|
|
struct tag *tag_dup(const struct tag *tag)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2008-08-29 09:38:11 +02:00
|
|
|
struct tag *ret;
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2006-07-20 18:02:40 +02:00
|
|
|
if (!tag)
|
|
|
|
return NULL;
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2008-08-29 09:38:21 +02:00
|
|
|
ret = tag_new();
|
2004-11-10 22:58:27 +01:00
|
|
|
ret->time = tag->time;
|
2009-02-27 09:01:55 +01:00
|
|
|
ret->num_items = tag->num_items;
|
|
|
|
ret->items = ret->num_items > 0 ? g_malloc(items_size(tag)) : NULL;
|
2004-06-01 12:28:06 +02:00
|
|
|
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_lock(tag_pool_lock);
|
2009-02-27 09:02:13 +01:00
|
|
|
for (unsigned i = 0; i < tag->num_items; i++)
|
2008-08-29 15:04:49 +02:00
|
|
|
ret->items[i] = tag_pool_dup_item(tag->items[i]);
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_unlock(tag_pool_lock);
|
2004-06-01 12:28:06 +02:00
|
|
|
|
2004-11-10 22:58:27 +01:00
|
|
|
return ret;
|
2004-06-01 12:28:06 +02:00
|
|
|
}
|
|
|
|
|
2009-01-03 23:28:51 +01:00
|
|
|
struct tag *
|
|
|
|
tag_merge(const struct tag *base, const struct tag *add)
|
|
|
|
{
|
|
|
|
struct tag *ret;
|
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
assert(base != NULL);
|
|
|
|
assert(add != NULL);
|
|
|
|
|
|
|
|
/* allocate new tag object */
|
|
|
|
|
|
|
|
ret = tag_new();
|
|
|
|
ret->time = add->time > 0 ? add->time : base->time;
|
2009-02-27 09:01:55 +01:00
|
|
|
ret->num_items = base->num_items + add->num_items;
|
|
|
|
ret->items = ret->num_items > 0 ? g_malloc(items_size(ret)) : NULL;
|
2009-01-03 23:28:51 +01:00
|
|
|
|
|
|
|
g_mutex_lock(tag_pool_lock);
|
|
|
|
|
|
|
|
/* copy all items from "add" */
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
for (unsigned i = 0; i < add->num_items; ++i)
|
2009-01-03 23:28:51 +01:00
|
|
|
ret->items[i] = tag_pool_dup_item(add->items[i]);
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
n = add->num_items;
|
2009-01-03 23:28:51 +01:00
|
|
|
|
|
|
|
/* copy additional items from "base" */
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
for (unsigned i = 0; i < base->num_items; ++i)
|
2009-01-03 23:28:51 +01:00
|
|
|
if (!tag_has_type(add, base->items[i]->type))
|
|
|
|
ret->items[n++] = tag_pool_dup_item(base->items[i]);
|
|
|
|
|
|
|
|
g_mutex_unlock(tag_pool_lock);
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
assert(n <= ret->num_items);
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (n < ret->num_items) {
|
2009-01-03 23:28:51 +01:00
|
|
|
/* some tags were not copied - shrink ret->items */
|
|
|
|
assert(n > 0);
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
ret->num_items = n;
|
2009-01-03 23:28:51 +01:00
|
|
|
ret->items = g_realloc(ret->items, items_size(ret));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-01-15 00:21:08 +01:00
|
|
|
const char *
|
|
|
|
tag_get_value(const struct tag *tag, enum tag_type type)
|
2008-11-03 18:24:00 +01:00
|
|
|
{
|
|
|
|
assert(tag != NULL);
|
|
|
|
assert(type < TAG_NUM_OF_ITEM_TYPES);
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
for (unsigned i = 0; i < tag->num_items; i++)
|
2008-11-03 18:24:00 +01:00
|
|
|
if (tag->items[i]->type == type)
|
2009-01-15 00:21:08 +01:00
|
|
|
return tag->items[i]->value;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-11-03 18:24:00 +01:00
|
|
|
|
2009-01-15 00:21:08 +01:00
|
|
|
bool tag_has_type(const struct tag *tag, enum tag_type type)
|
|
|
|
{
|
|
|
|
return tag_get_value(tag, type) != NULL;
|
2008-11-03 18:24:00 +01:00
|
|
|
}
|
|
|
|
|
2009-02-27 08:06:59 +01:00
|
|
|
bool tag_equal(const struct tag *tag1, const struct tag *tag2)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
|
|
|
if (tag1 == NULL && tag2 == NULL)
|
2009-02-27 08:06:59 +01:00
|
|
|
return true;
|
2006-07-20 18:02:40 +02:00
|
|
|
else if (!tag1 || !tag2)
|
2009-02-27 08:06:59 +01:00
|
|
|
return false;
|
2004-06-01 12:28:06 +02:00
|
|
|
|
2006-07-20 18:02:40 +02:00
|
|
|
if (tag1->time != tag2->time)
|
2009-02-27 08:06:59 +01:00
|
|
|
return false;
|
2004-06-01 12:28:06 +02:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (tag1->num_items != tag2->num_items)
|
2009-02-27 08:06:59 +01:00
|
|
|
return false;
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-02-27 09:02:13 +01:00
|
|
|
for (unsigned i = 0; i < tag1->num_items; i++) {
|
2008-08-29 09:38:29 +02:00
|
|
|
if (tag1->items[i]->type != tag2->items[i]->type)
|
2009-02-27 08:06:59 +01:00
|
|
|
return false;
|
2008-08-29 09:38:29 +02:00
|
|
|
if (strcmp(tag1->items[i]->value, tag2->items[i]->value)) {
|
2009-02-27 08:06:59 +01:00
|
|
|
return false;
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
}
|
2004-06-01 12:28:06 +02:00
|
|
|
|
2009-02-27 08:06:59 +01:00
|
|
|
return true;
|
2004-06-01 12:28:06 +02:00
|
|
|
}
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/**
|
|
|
|
* Replace invalid sequences with the question mark.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
patch_utf8(const char *src, size_t length, const gchar *end)
|
|
|
|
{
|
|
|
|
/* duplicate the string, and replace invalid bytes in that
|
|
|
|
buffer */
|
|
|
|
char *dest = g_strdup(src);
|
|
|
|
|
|
|
|
do {
|
|
|
|
dest[end - src] = '?';
|
|
|
|
} while (!g_utf8_validate(end + 1, (src + length) - (end + 1), &end));
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2008-11-01 14:04:15 +01:00
|
|
|
static char *
|
2009-01-03 14:52:49 +01:00
|
|
|
fix_utf8(const char *str, size_t length)
|
2008-11-01 14:04:15 +01:00
|
|
|
{
|
2009-02-27 09:02:32 +01:00
|
|
|
const gchar *end;
|
2008-11-01 14:04:15 +01:00
|
|
|
char *temp;
|
2008-10-31 13:57:10 +01:00
|
|
|
gsize written;
|
2008-08-29 09:38:54 +02:00
|
|
|
|
2008-08-29 09:38:56 +02:00
|
|
|
assert(str != NULL);
|
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* check if the string is already valid UTF-8 */
|
|
|
|
if (g_utf8_validate(str, length, &end))
|
2008-11-01 14:04:15 +01:00
|
|
|
return NULL;
|
2008-08-29 09:38:54 +02:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* no, it's not - try to import it from ISO-Latin-1 */
|
2009-01-03 14:52:49 +01:00
|
|
|
temp = g_convert(str, length, "utf-8", "iso-8859-1",
|
2009-01-04 19:50:22 +01:00
|
|
|
NULL, &written, NULL);
|
2009-02-27 09:02:32 +01:00
|
|
|
if (temp != NULL)
|
|
|
|
/* success! */
|
|
|
|
return temp;
|
2008-10-31 13:57:10 +01:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* no, still broken - there's no medication, just patch
|
|
|
|
invalid sequences */
|
|
|
|
return patch_utf8(str, length, end);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
void tag_begin_add(struct tag *tag)
|
|
|
|
{
|
|
|
|
assert(!bulk.busy);
|
|
|
|
assert(tag != NULL);
|
|
|
|
assert(tag->items == NULL);
|
2009-02-27 09:01:55 +01:00
|
|
|
assert(tag->num_items == 0);
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = true;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
#endif
|
|
|
|
tag->items = bulk.items;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tag_end_add(struct tag *tag)
|
|
|
|
{
|
|
|
|
if (tag->items == bulk.items) {
|
2009-02-27 09:01:55 +01:00
|
|
|
assert(tag->num_items <= BULK_MAX);
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (tag->num_items > 0) {
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
/* copy the tag items from the bulk list over
|
|
|
|
to a new list (which fits exactly) */
|
2008-11-01 14:33:14 +01:00
|
|
|
tag->items = g_malloc(items_size(tag));
|
2008-09-07 19:14:43 +02:00
|
|
|
memcpy(tag->items, bulk.items, items_size(tag));
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
} else
|
|
|
|
tag->items = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = false;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
static bool
|
|
|
|
char_is_non_printable(unsigned char ch)
|
|
|
|
{
|
|
|
|
return ch < 0x20;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
find_non_printable(const char *p, size_t length)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < length; ++i)
|
|
|
|
if (char_is_non_printable(p[i]))
|
|
|
|
return p + i;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Clears all non-printable characters, convert them to space.
|
|
|
|
* Returns NULL if nothing needs to be cleared.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
clear_non_printable(const char *p, size_t length)
|
|
|
|
{
|
|
|
|
const char *first = find_non_printable(p, length);
|
|
|
|
char *dest;
|
|
|
|
|
|
|
|
if (first == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2009-01-04 21:24:22 +01:00
|
|
|
dest = g_strndup(p, length);
|
2009-01-03 23:13:39 +01:00
|
|
|
|
|
|
|
for (size_t i = first - p; i < length; ++i)
|
|
|
|
if (char_is_non_printable(dest[i]))
|
|
|
|
dest[i] = ' ';
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
static char *
|
|
|
|
fix_tag_value(const char *p, size_t length)
|
|
|
|
{
|
2009-01-03 23:13:39 +01:00
|
|
|
char *utf8, *cleared;
|
2009-01-03 14:52:49 +01:00
|
|
|
|
|
|
|
utf8 = fix_utf8(p, length);
|
2009-01-03 23:13:39 +01:00
|
|
|
if (utf8 != NULL) {
|
|
|
|
p = utf8;
|
|
|
|
length = strlen(p);
|
|
|
|
}
|
2009-01-03 14:52:49 +01:00
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
cleared = clear_non_printable(p, length);
|
|
|
|
if (cleared == NULL)
|
|
|
|
cleared = utf8;
|
|
|
|
else
|
|
|
|
g_free(utf8);
|
2009-01-03 14:52:49 +01:00
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
return cleared;
|
2009-01-03 14:52:49 +01:00
|
|
|
}
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
static void
|
|
|
|
tag_add_item_internal(struct tag *tag, enum tag_type type,
|
|
|
|
const char *value, size_t len)
|
2004-11-10 22:58:27 +01:00
|
|
|
{
|
2009-02-27 09:01:55 +01:00
|
|
|
unsigned int i = tag->num_items;
|
2009-01-03 14:52:49 +01:00
|
|
|
char *p;
|
2008-08-29 09:39:04 +02:00
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
p = fix_tag_value(value, len);
|
2009-01-03 23:13:39 +01:00
|
|
|
if (p != NULL) {
|
|
|
|
value = p;
|
|
|
|
len = strlen(value);
|
|
|
|
}
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
tag->num_items++;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
|
|
|
if (tag->items != bulk.items)
|
|
|
|
/* bulk mode disabled */
|
2008-11-01 14:33:14 +01:00
|
|
|
tag->items = g_realloc(tag->items, items_size(tag));
|
2009-02-27 09:01:55 +01:00
|
|
|
else if (tag->num_items >= BULK_MAX) {
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
/* bulk list already full - switch back to non-bulk */
|
|
|
|
assert(bulk.busy);
|
|
|
|
|
2008-11-01 14:33:14 +01:00
|
|
|
tag->items = g_malloc(items_size(tag));
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
memcpy(tag->items, bulk.items,
|
2008-09-07 19:14:47 +02:00
|
|
|
items_size(tag) - sizeof(struct tag_item *));
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
}
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_lock(tag_pool_lock);
|
2009-01-03 23:13:39 +01:00
|
|
|
tag->items[i] = tag_pool_get_item(type, value, len);
|
2008-12-28 22:09:42 +01:00
|
|
|
g_mutex_unlock(tag_pool_lock);
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
g_free(p);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
|
2009-03-01 00:52:02 +01:00
|
|
|
void tag_add_item_n(struct tag *tag, enum tag_type type,
|
2008-08-29 09:38:21 +02:00
|
|
|
const char *value, size_t len)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-03-01 00:52:02 +01:00
|
|
|
if (ignore_tag_items[type])
|
2007-11-21 13:15:00 +01:00
|
|
|
{
|
2006-07-20 18:02:40 +02:00
|
|
|
return;
|
2007-11-21 13:15:00 +01:00
|
|
|
}
|
2006-07-20 18:02:40 +02:00
|
|
|
if (!value || !len)
|
|
|
|
return;
|
2004-11-10 23:13:30 +01:00
|
|
|
|
2009-03-01 00:52:02 +01:00
|
|
|
tag_add_item_internal(tag, type, value, len);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|