2009-03-13 18:43:16 +01:00
|
|
|
/*
|
2013-01-07 10:36:27 +01:00
|
|
|
* Copyright (C) 2003-2013 The Music Player Daemon Project
|
2009-03-13 18:43:16 +01:00
|
|
|
* http://www.musicpd.org
|
2004-02-24 00:41:20 +01:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2009-03-13 18:43:16 +01:00
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
2004-02-24 00:41:20 +01:00
|
|
|
*/
|
|
|
|
|
2009-11-12 09:12:38 +01:00
|
|
|
#include "config.h"
|
2013-07-30 20:11:57 +02:00
|
|
|
#include "Tag.hxx"
|
2013-01-07 10:36:27 +01:00
|
|
|
#include "TagInternal.hxx"
|
|
|
|
#include "TagPool.hxx"
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2008-10-31 13:57:10 +01:00
|
|
|
#include <glib.h>
|
2008-10-08 10:49:29 +02:00
|
|
|
#include <assert.h>
|
2013-07-30 20:11:57 +02:00
|
|
|
#include <string.h>
|
2008-10-08 10:49:29 +02:00
|
|
|
|
2008-09-06 15:31:55 +02:00
|
|
|
/**
|
|
|
|
* Maximum number of items managed in the bulk list; if it is
|
|
|
|
* exceeded, we switch back to "normal" reallocation.
|
|
|
|
*/
|
|
|
|
#define BULK_MAX 64
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bool busy;
|
2008-09-06 15:31:55 +02:00
|
|
|
#endif
|
2013-07-30 20:11:57 +02:00
|
|
|
TagItem *items[BULK_MAX];
|
2008-09-06 15:31:55 +02:00
|
|
|
} bulk;
|
|
|
|
|
2009-03-01 00:58:32 +01:00
|
|
|
bool ignore_tag_items[TAG_NUM_OF_ITEM_TYPES];
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-11-04 18:47:42 +01:00
|
|
|
enum tag_type
|
|
|
|
tag_name_parse(const char *name)
|
|
|
|
{
|
2013-01-07 10:36:27 +01:00
|
|
|
assert(name != nullptr);
|
2009-11-04 18:47:42 +01:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < TAG_NUM_OF_ITEM_TYPES; ++i) {
|
2013-01-07 10:36:27 +01:00
|
|
|
assert(tag_item_names[i] != nullptr);
|
2009-11-04 18:47:42 +01:00
|
|
|
|
|
|
|
if (strcmp(name, tag_item_names[i]) == 0)
|
|
|
|
return (enum tag_type)i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TAG_NUM_OF_ITEM_TYPES;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum tag_type
|
|
|
|
tag_name_parse_i(const char *name)
|
|
|
|
{
|
2013-01-07 10:36:27 +01:00
|
|
|
assert(name != nullptr);
|
2009-11-04 18:47:42 +01:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < TAG_NUM_OF_ITEM_TYPES; ++i) {
|
2013-01-07 10:36:27 +01:00
|
|
|
assert(tag_item_names[i] != nullptr);
|
2009-11-04 18:47:42 +01:00
|
|
|
|
|
|
|
if (g_ascii_strcasecmp(name, tag_item_names[i]) == 0)
|
|
|
|
return (enum tag_type)i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TAG_NUM_OF_ITEM_TYPES;
|
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
static size_t
|
|
|
|
items_size(const Tag &tag)
|
2008-09-07 19:14:43 +02:00
|
|
|
{
|
2013-07-30 20:11:57 +02:00
|
|
|
return tag.num_items * sizeof(TagItem *);
|
2008-09-07 19:14:43 +02:00
|
|
|
}
|
|
|
|
|
2008-08-29 09:38:21 +02:00
|
|
|
void tag_lib_init(void)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-03-01 00:58:32 +01:00
|
|
|
/* ignore comments by default */
|
2009-10-13 16:12:45 +02:00
|
|
|
ignore_tag_items[TAG_COMMENT] = true;
|
2004-04-13 04:20:46 +02:00
|
|
|
}
|
|
|
|
|
2013-07-31 00:26:55 +02:00
|
|
|
void
|
|
|
|
Tag::Clear()
|
|
|
|
{
|
|
|
|
time = -1;
|
|
|
|
has_playlist = false;
|
|
|
|
|
|
|
|
tag_pool_lock.lock();
|
|
|
|
for (unsigned i = 0; i < num_items; ++i)
|
|
|
|
tag_pool_put_item(items[i]);
|
|
|
|
tag_pool_lock.unlock();
|
|
|
|
|
2013-07-31 09:02:07 +02:00
|
|
|
if (items == bulk.items) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(bulk.busy);
|
|
|
|
bulk.busy = false;
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
g_free(items);
|
|
|
|
|
2013-07-31 00:26:55 +02:00
|
|
|
items = nullptr;
|
|
|
|
num_items = 0;
|
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag::~Tag()
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.lock();
|
2013-07-30 20:11:57 +02:00
|
|
|
for (int i = num_items; --i >= 0; )
|
|
|
|
tag_pool_put_item(items[i]);
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.unlock();
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
if (items == bulk.items) {
|
2008-09-06 15:31:55 +02:00
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(bulk.busy);
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = false;
|
2008-09-06 15:31:55 +02:00
|
|
|
#endif
|
2009-01-25 18:47:21 +01:00
|
|
|
} else
|
2013-07-30 20:11:57 +02:00
|
|
|
g_free(items);
|
2004-02-24 00:41:20 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag::Tag(const Tag &other)
|
|
|
|
:time(other.time), has_playlist(other.has_playlist),
|
|
|
|
items(nullptr),
|
|
|
|
num_items(other.num_items)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2013-07-30 20:11:57 +02:00
|
|
|
if (num_items > 0) {
|
|
|
|
items = (TagItem **)g_malloc(items_size(other));
|
2004-02-24 00:41:20 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
tag_pool_lock.lock();
|
|
|
|
for (unsigned i = 0; i < num_items; i++)
|
|
|
|
items[i] = tag_pool_dup_item(other.items[i]);
|
|
|
|
tag_pool_lock.unlock();
|
|
|
|
}
|
2004-06-01 12:28:06 +02:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag *
|
|
|
|
Tag::Merge(const Tag &base, const Tag &add)
|
2009-01-03 23:28:51 +01:00
|
|
|
{
|
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
/* allocate new tag object */
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag *ret = new Tag();
|
|
|
|
ret->time = add.time > 0 ? add.time : base.time;
|
|
|
|
ret->num_items = base.num_items + add.num_items;
|
2013-01-07 10:36:27 +01:00
|
|
|
ret->items = ret->num_items > 0
|
2013-07-30 20:11:57 +02:00
|
|
|
? (TagItem **)g_malloc(items_size(*ret))
|
2013-01-07 10:36:27 +01:00
|
|
|
: nullptr;
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.lock();
|
2009-01-03 23:28:51 +01:00
|
|
|
|
|
|
|
/* copy all items from "add" */
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
for (unsigned i = 0; i < add.num_items; ++i)
|
|
|
|
ret->items[i] = tag_pool_dup_item(add.items[i]);
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
n = add.num_items;
|
2009-01-03 23:28:51 +01:00
|
|
|
|
|
|
|
/* copy additional items from "base" */
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
for (unsigned i = 0; i < base.num_items; ++i)
|
|
|
|
if (!add.HasType(base.items[i]->type))
|
|
|
|
ret->items[n++] = tag_pool_dup_item(base.items[i]);
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.unlock();
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
assert(n <= ret->num_items);
|
2009-01-03 23:28:51 +01:00
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
if (n < ret->num_items) {
|
2009-01-03 23:28:51 +01:00
|
|
|
/* some tags were not copied - shrink ret->items */
|
|
|
|
assert(n > 0);
|
|
|
|
|
2009-02-27 09:01:55 +01:00
|
|
|
ret->num_items = n;
|
2013-07-30 20:11:57 +02:00
|
|
|
ret->items = (TagItem **)
|
|
|
|
g_realloc(ret->items, items_size(*ret));
|
2009-01-03 23:28:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag *
|
|
|
|
Tag::MergeReplace(Tag *base, Tag *add)
|
2010-03-17 23:12:21 +01:00
|
|
|
{
|
2013-01-07 10:36:27 +01:00
|
|
|
if (add == nullptr)
|
2010-03-17 23:12:21 +01:00
|
|
|
return base;
|
|
|
|
|
2013-01-07 10:36:27 +01:00
|
|
|
if (base == nullptr)
|
2010-03-17 23:12:21 +01:00
|
|
|
return add;
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag *tag = Merge(*base, *add);
|
|
|
|
delete base;
|
|
|
|
delete add;
|
2010-03-17 23:12:21 +01:00
|
|
|
|
|
|
|
return tag;
|
|
|
|
}
|
|
|
|
|
2009-01-15 00:21:08 +01:00
|
|
|
const char *
|
2013-07-30 20:11:57 +02:00
|
|
|
Tag::GetValue(tag_type type) const
|
2008-11-03 18:24:00 +01:00
|
|
|
{
|
|
|
|
assert(type < TAG_NUM_OF_ITEM_TYPES);
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
for (unsigned i = 0; i < num_items; i++)
|
|
|
|
if (items[i]->type == type)
|
|
|
|
return items[i]->value;
|
2009-01-15 00:21:08 +01:00
|
|
|
|
2013-01-07 10:36:27 +01:00
|
|
|
return nullptr;
|
2009-01-15 00:21:08 +01:00
|
|
|
}
|
2008-11-03 18:24:00 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
bool
|
|
|
|
Tag::HasType(tag_type type) const
|
2009-01-15 00:21:08 +01:00
|
|
|
{
|
2013-07-30 20:11:57 +02:00
|
|
|
return GetValue(type) != nullptr;
|
2008-11-03 18:24:00 +01:00
|
|
|
}
|
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/**
|
|
|
|
* Replace invalid sequences with the question mark.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
patch_utf8(const char *src, size_t length, const gchar *end)
|
|
|
|
{
|
|
|
|
/* duplicate the string, and replace invalid bytes in that
|
|
|
|
buffer */
|
|
|
|
char *dest = g_strdup(src);
|
|
|
|
|
|
|
|
do {
|
|
|
|
dest[end - src] = '?';
|
|
|
|
} while (!g_utf8_validate(end + 1, (src + length) - (end + 1), &end));
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2008-11-01 14:04:15 +01:00
|
|
|
static char *
|
2009-01-03 14:52:49 +01:00
|
|
|
fix_utf8(const char *str, size_t length)
|
2008-11-01 14:04:15 +01:00
|
|
|
{
|
2009-02-27 09:02:32 +01:00
|
|
|
const gchar *end;
|
2008-11-01 14:04:15 +01:00
|
|
|
char *temp;
|
2008-10-31 13:57:10 +01:00
|
|
|
gsize written;
|
2008-08-29 09:38:54 +02:00
|
|
|
|
2013-01-07 10:36:27 +01:00
|
|
|
assert(str != nullptr);
|
2008-08-29 09:38:56 +02:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* check if the string is already valid UTF-8 */
|
|
|
|
if (g_utf8_validate(str, length, &end))
|
2013-01-07 10:36:27 +01:00
|
|
|
return nullptr;
|
2008-08-29 09:38:54 +02:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* no, it's not - try to import it from ISO-Latin-1 */
|
2009-01-03 14:52:49 +01:00
|
|
|
temp = g_convert(str, length, "utf-8", "iso-8859-1",
|
2013-01-07 10:36:27 +01:00
|
|
|
nullptr, &written, nullptr);
|
|
|
|
if (temp != nullptr)
|
2009-02-27 09:02:32 +01:00
|
|
|
/* success! */
|
|
|
|
return temp;
|
2008-10-31 13:57:10 +01:00
|
|
|
|
2009-02-27 09:02:32 +01:00
|
|
|
/* no, still broken - there's no medication, just patch
|
|
|
|
invalid sequences */
|
|
|
|
return patch_utf8(str, length, end);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
void
|
|
|
|
Tag::BeginAdd()
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
{
|
|
|
|
assert(!bulk.busy);
|
2013-07-30 20:11:57 +02:00
|
|
|
assert(items == nullptr);
|
|
|
|
assert(num_items == 0);
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = true;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
#endif
|
2013-07-30 20:11:57 +02:00
|
|
|
items = bulk.items;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
void
|
|
|
|
Tag::EndAdd()
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
{
|
2013-07-30 20:11:57 +02:00
|
|
|
if (items == bulk.items) {
|
|
|
|
assert(num_items <= BULK_MAX);
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
if (num_items > 0) {
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
/* copy the tag items from the bulk list over
|
|
|
|
to a new list (which fits exactly) */
|
2013-07-30 20:11:57 +02:00
|
|
|
items = (TagItem **)
|
|
|
|
g_malloc(items_size(*this));
|
|
|
|
memcpy(items, bulk.items, items_size(*this));
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
} else
|
2013-07-30 20:11:57 +02:00
|
|
|
items = nullptr;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2009-02-27 08:06:59 +01:00
|
|
|
bulk.busy = false;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
static bool
|
|
|
|
char_is_non_printable(unsigned char ch)
|
|
|
|
{
|
|
|
|
return ch < 0x20;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
find_non_printable(const char *p, size_t length)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < length; ++i)
|
|
|
|
if (char_is_non_printable(p[i]))
|
|
|
|
return p + i;
|
|
|
|
|
2013-01-07 10:36:27 +01:00
|
|
|
return nullptr;
|
2009-01-03 23:13:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Clears all non-printable characters, convert them to space.
|
2013-01-07 10:36:27 +01:00
|
|
|
* Returns nullptr if nothing needs to be cleared.
|
2009-01-03 23:13:39 +01:00
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
clear_non_printable(const char *p, size_t length)
|
|
|
|
{
|
|
|
|
const char *first = find_non_printable(p, length);
|
|
|
|
char *dest;
|
|
|
|
|
2013-01-07 10:36:27 +01:00
|
|
|
if (first == nullptr)
|
|
|
|
return nullptr;
|
2009-01-03 23:13:39 +01:00
|
|
|
|
2009-01-04 21:24:22 +01:00
|
|
|
dest = g_strndup(p, length);
|
2009-01-03 23:13:39 +01:00
|
|
|
|
|
|
|
for (size_t i = first - p; i < length; ++i)
|
|
|
|
if (char_is_non_printable(dest[i]))
|
|
|
|
dest[i] = ' ';
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
static char *
|
|
|
|
fix_tag_value(const char *p, size_t length)
|
|
|
|
{
|
2009-01-03 23:13:39 +01:00
|
|
|
char *utf8, *cleared;
|
2009-01-03 14:52:49 +01:00
|
|
|
|
|
|
|
utf8 = fix_utf8(p, length);
|
2013-01-07 10:36:27 +01:00
|
|
|
if (utf8 != nullptr) {
|
2009-01-03 23:13:39 +01:00
|
|
|
p = utf8;
|
|
|
|
length = strlen(p);
|
|
|
|
}
|
2009-01-03 14:52:49 +01:00
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
cleared = clear_non_printable(p, length);
|
2013-01-07 10:36:27 +01:00
|
|
|
if (cleared == nullptr)
|
2009-01-03 23:13:39 +01:00
|
|
|
cleared = utf8;
|
|
|
|
else
|
|
|
|
g_free(utf8);
|
2009-01-03 14:52:49 +01:00
|
|
|
|
2009-01-03 23:13:39 +01:00
|
|
|
return cleared;
|
2009-01-03 14:52:49 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
void
|
|
|
|
Tag::AddItemInternal(tag_type type, const char *value, size_t len)
|
2004-11-10 22:58:27 +01:00
|
|
|
{
|
2013-07-30 20:11:57 +02:00
|
|
|
unsigned int i = num_items;
|
2009-01-03 14:52:49 +01:00
|
|
|
char *p;
|
2008-08-29 09:39:04 +02:00
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
p = fix_tag_value(value, len);
|
2013-01-07 10:36:27 +01:00
|
|
|
if (p != nullptr) {
|
2009-01-03 23:13:39 +01:00
|
|
|
value = p;
|
|
|
|
len = strlen(value);
|
|
|
|
}
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
num_items++;
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
if (items != bulk.items)
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
/* bulk mode disabled */
|
2013-07-30 20:11:57 +02:00
|
|
|
items = (TagItem **)
|
|
|
|
g_realloc(items, items_size(*this));
|
|
|
|
else if (num_items >= BULK_MAX) {
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
/* bulk list already full - switch back to non-bulk */
|
|
|
|
assert(bulk.busy);
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
items = (TagItem **)g_malloc(items_size(*this));
|
|
|
|
memcpy(items, bulk.items,
|
|
|
|
items_size(*this) - sizeof(TagItem *));
|
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being
loaded, manage these items in a static fixed list, instead of
reallocating the list with every newly created item. This reduces
heap fragmentation.
Massif results again:
mk before: total 12,837,632; useful 10,626,383; extra 2,211,249
mk now: total 12,736,720; useful 10,626,383; extra 2,110,337
The "useful" value is the same since this patch only changes the way
we allocate the same amount of memory, but heap fragmentation was
reduced by 5%.
2008-08-29 09:39:08 +02:00
|
|
|
}
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.lock();
|
2013-07-30 20:11:57 +02:00
|
|
|
items[i] = tag_pool_get_item(type, value, len);
|
2013-01-07 10:49:21 +01:00
|
|
|
tag_pool_lock.unlock();
|
2004-11-10 22:58:27 +01:00
|
|
|
|
2009-01-03 14:52:49 +01:00
|
|
|
g_free(p);
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
void
|
|
|
|
Tag::AddItem(tag_type type, const char *value, size_t len)
|
2006-07-20 18:02:40 +02:00
|
|
|
{
|
2009-03-01 00:52:02 +01:00
|
|
|
if (ignore_tag_items[type])
|
2006-07-20 18:02:40 +02:00
|
|
|
return;
|
2013-07-30 20:11:57 +02:00
|
|
|
|
|
|
|
if (value == nullptr || len == 0)
|
2006-07-20 18:02:40 +02:00
|
|
|
return;
|
2004-11-10 23:13:30 +01:00
|
|
|
|
2013-07-30 20:11:57 +02:00
|
|
|
AddItemInternal(type, value, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Tag::AddItem(tag_type type, const char *value)
|
|
|
|
{
|
|
|
|
AddItem(type, value, strlen(value));
|
2004-11-10 22:58:27 +01:00
|
|
|
}
|