Drop support for SUBNETS_ACCESSED and CLIENT_ACCESSES commands

Support for the SUBNETS_ACCESSED and CLIENT_ACCESSES commands was
enabled in chronyd, but in chronyc it was always disabled and the
CLIENT_ACCESSES_BY_INDEX command was used instead. As there is no plan
to enable it in the future, remove the support completely.
This commit is contained in:
Miroslav Lichvar 2013-07-31 15:03:27 +02:00
parent c6fdeeb6bb
commit ad58baa13b
4 changed files with 6 additions and 378 deletions

29
candm.h
View file

@ -298,22 +298,10 @@ typedef struct {
uint32_t bits_specd;
} REQ_SubnetsAccessed_Subnet;
#define MAX_SUBNETS_ACCESSED 8
typedef struct {
uint32_t n_subnets;
REQ_SubnetsAccessed_Subnet subnets[MAX_SUBNETS_ACCESSED];
} REQ_SubnetsAccessed;
/* This is based on the response size rather than the
request size */
#define MAX_CLIENT_ACCESSES 8
typedef struct {
uint32_t n_clients;
IPAddr client_ips[MAX_CLIENT_ACCESSES];
} REQ_ClientAccesses;
typedef struct {
uint32_t first_index;
uint32_t n_indices;
@ -370,7 +358,8 @@ typedef struct {
modify polltarget, modify maxdelaydevratio, reselect, reselectdistance
Version 5 : auth data moved to the end of the packet to allow hashes with
different sizes, extended sources, tracking and activity reports
different sizes, extended sources, tracking and activity reports, dropped
subnets accessed and client accesses
*/
#define PROTO_VERSION_NUMBER 5
@ -426,8 +415,6 @@ typedef struct {
REQ_RTCReport rtcreport;
REQ_TrimRTC trimrtc;
REQ_CycleLogs cyclelogs;
REQ_SubnetsAccessed subnets_accessed;
REQ_ClientAccesses client_accesses;
REQ_ClientAccessesByIndex client_accesses_by_index;
REQ_ManualList manual_list;
REQ_ManualDelete manual_delete;
@ -582,11 +569,6 @@ typedef struct {
uint32_t bitmap[8];
} RPY_SubnetsAccessed_Subnet;
typedef struct {
uint32_t n_subnets;
RPY_SubnetsAccessed_Subnet subnets[MAX_SUBNETS_ACCESSED];
} RPY_SubnetsAccessed;
typedef struct {
IPAddr ip;
uint32_t client_hits;
@ -598,11 +580,6 @@ typedef struct {
uint32_t last_cmd_hit_ago;
} RPY_ClientAccesses_Client;
typedef struct {
uint32_t n_clients;
RPY_ClientAccesses_Client clients[MAX_CLIENT_ACCESSES];
} RPY_ClientAccesses;
typedef struct {
uint32_t n_indices; /* how many indices there are in the server's table */
uint32_t next_index; /* the index 1 beyond those processed on this call */
@ -656,8 +633,6 @@ typedef struct {
RPY_Tracking tracking;
RPY_Sourcestats sourcestats;
RPY_Rtc rtc;
RPY_SubnetsAccessed subnets_accessed;
RPY_ClientAccesses client_accesses;
RPY_ClientAccessesByIndex client_accesses_by_index;
RPY_ManualList manual_list;
RPY_Activity activity;

201
client.c
View file

@ -1991,207 +1991,6 @@ process_cmd_rtcreport(char *line)
/* ================================================== */
#if 0
/* This is a previous attempt at implementing the clients command. It
could be re-instated sometime as a way of looking at all clients in a
particular subnet. The problem with it is that is requires at least 5
round trips to the server even if the server only has one client to
report. */
typedef struct XSubnetToDo {
struct XSubnetToDo *next;
unsigned long ip;
unsigned long bits;
} SubnetToDo;
static void
process_cmd_clients(char *line)
{
CMD_Request request;
CMD_Reply reply;
SubnetToDo *head, *todo, *tail, *p, *next_node, *new_node;
int i, j, nets_looked_up, clients_looked_up;
int word;
unsigned long mask;
unsigned long ip, bits;
unsigned long client_hits;
unsigned long peer_hits;
unsigned long cmd_hits_auth;
unsigned long cmd_hits_normal;
unsigned long cmd_hits_bad;
unsigned long last_ntp_hit_ago;
unsigned long last_cmd_hit_ago;
char hostname_buf[50];
int n_replies;
head = todo = MallocNew(SubnetToDo);
todo->next = NULL;
/* Set up initial query = root subnet */
todo->ip = 0;
todo->bits = 0;
tail = todo;
do {
request.command = htons(REQ_SUBNETS_ACCESSED);
/* Build list of subnets to examine */
i=0;
p=todo;
while((i < MAX_SUBNETS_ACCESSED) &&
p &&
(p->bits < 32)) {
request.data.subnets_accessed.subnets[i].ip = htonl(p->ip);
request.data.subnets_accessed.subnets[i].bits_specd = htonl(p->bits);
p = p->next;
i++;
}
nets_looked_up = i;
if (nets_looked_up == 0) {
/* No subnets need examining */
break;
}
request.data.subnets_accessed.n_subnets = htonl(nets_looked_up);
if (request_reply(&request, &reply, RPY_SUBNETS_ACCESSED, 0)) {
n_replies = ntohl(reply.data.subnets_accessed.n_subnets);
for (j=0; j<n_replies; j++) {
ip = ntohl(reply.data.subnets_accessed.subnets[j].ip);
bits = ntohl(reply.data.subnets_accessed.subnets[j].bits_specd);
for (i=0; i<256; i++) {
word = i/32;
mask = 1UL << (i%32);
if (ntohl(reply.data.subnets_accessed.subnets[j].bitmap[word]) & mask) {
/* Add this subnet to the todo list */
new_node = MallocNew(SubnetToDo);
new_node->next = NULL;
new_node->bits = bits + 8;
new_node->ip = ip | (i << (24 - bits));
tail->next = new_node;
tail = new_node;
#if 0
printf("%08lx %2d %3d %08lx\n", ip, bits, i, new_node->ip);
#endif
}
}
}
/* Skip the todo pointer forwards by the number of nets looked
up. Can't do this earlier, because we might have to point
at the next layer of subnets that have only just been
concatenated to the linked list. */
for (i=0; i<nets_looked_up; i++) {
todo = todo->next;
}
}
} else {
return;
}
} while (1); /* keep going until all subnets have been expanded,
down to single nodes */
/* Now the todo list consists of client records */
request.command = htons(REQ_CLIENT_ACCESSES);
#if 0
printf("%d %d\n", sizeof (RPY_ClientAccesses_Client), offsetof(CMD_Reply, data.client_accesses.clients));
#endif
printf("Hostname Client Peer CmdAuth CmdNorm CmdBad LstN LstC\n"
"========================= ====== ====== ====== ====== ====== ==== ====\n");
do {
i = 0;
p = todo;
while ((i < MAX_CLIENT_ACCESSES) &&
p) {
request.data.client_accesses.client_ips[i] = htonl(p->ip);
p = p->next;
i++;
}
clients_looked_up = i;
if (clients_looked_up == 0) {
/* No more clients to do */
break;
}
request.data.client_accesses.n_clients = htonl(clients_looked_up);
if (request_reply(&request, &reply, RPY_CLIENT_ACCESSES, 0)) {
n_replies = ntohl(reply.data.client_accesses.n_clients);
for (j=0; j<n_replies; j++) {
ip = ntohl(reply.data.client_accesses.clients[j].ip);
if (ip != 0UL) {
/* ip == 0 implies that the node could not be found in
the daemon's tables; we shouldn't ever generate this
case, but ignore it if we do. (In future there might
be a protocol to reset the client logging; if another
administrator runs that while we're doing the clients
command, there will be a race condition that could
cause this). */
client_hits = ntohl(reply.data.client_accesses.clients[j].client_hits);
peer_hits = ntohl(reply.data.client_accesses.clients[j].peer_hits);
cmd_hits_auth = ntohl(reply.data.client_accesses.clients[j].cmd_hits_auth);
cmd_hits_normal = ntohl(reply.data.client_accesses.clients[j].cmd_hits_normal);
cmd_hits_bad = ntohl(reply.data.client_accesses.clients[j].cmd_hits_bad);
last_ntp_hit_ago = ntohl(reply.data.client_accesses.clients[j].last_ntp_hit_ago);
last_cmd_hit_ago = ntohl(reply.data.client_accesses.clients[j].last_cmd_hit_ago);
if (no_dns) {
snprintf(hostname_buf, sizeof(hostname_buf),
"%s", UTI_IPToDottedQuad(ip));
} else {
DNS_IPAddress2Name(ip, hostname_buf, sizeof(hostname_buf));
hostname_buf[25] = 0;
}
printf("%-25s %6d %6d %6d %6d %6d ",
hostname_buf,
client_hits, peer_hits,
cmd_hits_auth, cmd_hits_normal, cmd_hits_bad);
print_seconds(last_ntp_hit_ago);
printf(" ");
print_seconds(last_cmd_hit_ago);
printf("\n");
}
}
/* Skip the todo pointer forwards by the number of nets looked
up. Can't do this earlier, because we might have to point
at the next layer of subnets that have only just been
concatenated to the linked list. */
for (i=0; i<clients_looked_up; i++) {
todo = todo->next;
}
}
} while (1);
cleanup:
for (p = head; p; ) {
next_node = p->next;
Free(p);
p = next_node;
}
}
#endif
/* New implementation of clients command */
static int
process_cmd_clients(char *line)
{

112
cmdmon.c
View file

@ -1479,110 +1479,6 @@ handle_cyclelogs(CMD_Request *rx_message, CMD_Reply *tx_message)
/* ================================================== */
#define FLIPL(X) ((X) = htonl(X))
static void
handle_subnets_accessed(CMD_Request *rx_message, CMD_Reply *tx_message)
{
int i, j;
unsigned long ns, bits_specd;
IPAddr ip;
CLG_Status result;
ns = ntohl(rx_message->data.subnets_accessed.n_subnets);
tx_message->status = htons(STT_SUCCESS);
tx_message->reply = htons(RPY_SUBNETS_ACCESSED);
tx_message->data.subnets_accessed.n_subnets = htonl(ns);
for (i=0; i<ns; i++) {
UTI_IPNetworkToHost(&rx_message->data.subnets_accessed.subnets[i].ip, &ip);
bits_specd = ntohl(rx_message->data.subnets_accessed.subnets[i].bits_specd);
UTI_IPHostToNetwork(&ip, &tx_message->data.subnets_accessed.subnets[i].ip);
tx_message->data.subnets_accessed.subnets[i].bits_specd = htonl(bits_specd);
result = CLG_GetSubnetBitmap(&ip, bits_specd, tx_message->data.subnets_accessed.subnets[i].bitmap);
switch (result) {
case CLG_SUCCESS:
case CLG_EMPTYSUBNET:
/* Flip endianness of each 4 byte word. Don't care if subnet
is empty - just return an all-zero bitmap. */
for (j=0; j<8; j++) {
FLIPL(tx_message->data.subnets_accessed.subnets[i].bitmap[j]);
}
break;
case CLG_BADSUBNET:
tx_message->status = htons(STT_BADSUBNET);
tx_message->data.subnets_accessed.n_subnets = htonl(0);
return;
case CLG_INACTIVE:
tx_message->status = htons(STT_INACTIVE);
tx_message->data.subnets_accessed.n_subnets = htonl(0);
return;
default:
assert(0);
break;
}
}
}
/* ================================================== */
static void
handle_client_accesses(CMD_Request *rx_message, CMD_Reply *tx_message)
{
CLG_Status result;
RPT_ClientAccess_Report report;
unsigned long nc;
IPAddr ip;
int i;
struct timeval now;
LCL_ReadCookedTime(&now, NULL);
nc = ntohl(rx_message->data.client_accesses.n_clients);
tx_message->status = htons(STT_SUCCESS);
tx_message->reply = htons(RPY_CLIENT_ACCESSES);
tx_message->data.client_accesses.n_clients = htonl(nc);
printf("%d %d\n", (int)sizeof(RPY_ClientAccesses_Client), (int)offsetof(CMD_Reply, data.client_accesses.clients));
for (i=0; i<nc; i++) {
UTI_IPNetworkToHost(&rx_message->data.client_accesses.client_ips[i], &ip);
UTI_IPHostToNetwork(&ip, &tx_message->data.client_accesses.clients[i].ip);
result = CLG_GetClientAccessReportByIP(&ip, &report, now.tv_sec);
switch (result) {
case CLG_SUCCESS:
tx_message->data.client_accesses.clients[i].client_hits = htonl(report.client_hits);
tx_message->data.client_accesses.clients[i].peer_hits = htonl(report.peer_hits);
tx_message->data.client_accesses.clients[i].cmd_hits_auth = htonl(report.cmd_hits_auth);
tx_message->data.client_accesses.clients[i].cmd_hits_normal = htonl(report.cmd_hits_normal);
tx_message->data.client_accesses.clients[i].cmd_hits_bad = htonl(report.cmd_hits_bad);
tx_message->data.client_accesses.clients[i].last_ntp_hit_ago = htonl(report.last_ntp_hit_ago);
tx_message->data.client_accesses.clients[i].last_cmd_hit_ago = htonl(report.last_cmd_hit_ago);
printf("%s %lu %lu %lu %lu %lu %lu %lu\n", UTI_IPToString(&ip), report.client_hits, report.peer_hits, report.cmd_hits_auth, report.cmd_hits_normal, report.cmd_hits_bad, report.last_ntp_hit_ago, report.last_cmd_hit_ago);
break;
case CLG_EMPTYSUBNET:
/* Signal back to the client that this single client address
was unknown */
ip.family = IPADDR_UNSPEC;
UTI_IPHostToNetwork(&ip, &tx_message->data.client_accesses.clients[i].ip);
break;
case CLG_INACTIVE:
tx_message->status = htons(STT_INACTIVE);
tx_message->data.client_accesses.n_clients = htonl(0);
return;
default:
assert(0);
break;
}
}
}
/* ================================================== */
static void
handle_client_accesses_by_index(CMD_Request *rx_message, CMD_Reply *tx_message)
{
@ -2243,14 +2139,6 @@ read_from_cmd_socket(void *anything)
handle_cyclelogs(&rx_message, &tx_message);
break;
case REQ_SUBNETS_ACCESSED:
handle_subnets_accessed(&rx_message, &tx_message);
break;
case REQ_CLIENT_ACCESSES:
handle_client_accesses(&rx_message, &tx_message);
break;
case REQ_CLIENT_ACCESSES_BY_INDEX:
handle_client_accesses_by_index(&rx_message, &tx_message);
break;

View file

@ -124,23 +124,9 @@ PKL_CommandLength(CMD_Request *r)
case REQ_CYCLELOGS :
return offsetof(CMD_Request, data.cyclelogs.EOR);
case REQ_SUBNETS_ACCESSED :
{
unsigned long ns;
ns = ntohl(r->data.subnets_accessed.n_subnets);
if (ns > MAX_SUBNETS_ACCESSED)
return 0;
return (offsetof(CMD_Request, data.subnets_accessed.subnets) +
ns * sizeof(REQ_SubnetsAccessed_Subnet));
}
case REQ_CLIENT_ACCESSES:
{
unsigned long nc;
nc = ntohl(r->data.client_accesses.n_clients);
if (nc > MAX_CLIENT_ACCESSES)
return 0;
return (offsetof(CMD_Request, data.client_accesses.client_ips) +
nc * sizeof(unsigned long));
}
/* No longer supported */
return 0;
case REQ_CLIENT_ACCESSES_BY_INDEX:
return offsetof(CMD_Request, data.client_accesses_by_index.EOR);
case REQ_MANUAL_LIST:
@ -198,29 +184,9 @@ PKL_ReplyLength(CMD_Reply *r)
case RPY_RTC:
return offsetof(CMD_Reply, data.rtc.EOR);
case RPY_SUBNETS_ACCESSED :
{
unsigned long ns = ntohl(r->data.subnets_accessed.n_subnets);
if (r->status == htons(STT_SUCCESS)) {
if (ns > MAX_SUBNETS_ACCESSED)
return 0;
return (offsetof(CMD_Reply, data.subnets_accessed.subnets) +
ns * sizeof(RPY_SubnetsAccessed_Subnet));
} else {
return offsetof(CMD_Reply, data);
}
}
case RPY_CLIENT_ACCESSES:
{
unsigned long nc = ntohl(r->data.client_accesses.n_clients);
if (r->status == htons(STT_SUCCESS)) {
if (nc > MAX_CLIENT_ACCESSES)
return 0;
return (offsetof(CMD_Reply, data.client_accesses.clients) +
nc * sizeof(RPY_ClientAccesses_Client));
} else {
return offsetof(CMD_Reply, data);
}
}
/* No longer supported */
return 0;
case RPY_CLIENT_ACCESSES_BY_INDEX:
{
unsigned long nc = ntohl(r->data.client_accesses_by_index.n_clients);