Changes in / [4b82445:738b549] in mainline
- Files:
-
- 5 deleted
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r4b82445 r738b549 578 578 # USB settings 579 579 580 % USB verbose messages581 ! CONFIG_USB_ VERBOSE (n/y)580 % USB release build (less logging) 581 ! CONFIG_USB_RELEASE_BUILD (y/n) 582 582 583 583 % Start virtual USB host controller -
boot/Makefile.common
r4b82445 r738b549 177 177 $(USPACE_PATH)/app/nettest3/nettest3 \ 178 178 $(USPACE_PATH)/app/netecho/netecho \ 179 $(USPACE_PATH)/app/nterm/nterm \180 179 $(USPACE_PATH)/app/ping/ping \ 181 180 $(USPACE_PATH)/app/stats/stats \ -
kernel/arch/amd64/include/mm/page.h
r4b82445 r738b549 31 31 */ 32 32 /** @file 33 */ 34 35 /** Paging on AMD64 36 * 37 * The space is divided in positive numbers (uspace) and 38 * negative numbers (kernel). The 'negative' space starting 39 * with 0xffff800000000000 and ending with 0xffffffffffffffff 40 * is identically mapped physical memory. 41 * 33 42 */ 34 43 -
kernel/genarch/src/mm/page_pt.c
r4b82445 r738b549 48 48 #include <align.h> 49 49 #include <macros.h> 50 #include <bitops.h>51 50 52 51 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); … … 293 292 } 294 293 295 /** Return the size of the region mapped by a single PTL0 entry.296 *297 * @return Size of the region mapped by a single PTL0 entry.298 */299 static uintptr_t ptl0_step_get(void)300 {301 size_t va_bits;302 303 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +304 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;305 306 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));307 }308 309 294 /** Make the mappings in the given range global accross all address spaces. 310 295 * … … 324 309 { 325 310 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 326 uintptr_t ptl0 _step = ptl0_step_get();311 uintptr_t ptl0step = (((uintptr_t) -1) / PTL0_ENTRIES) + 1; 327 312 size_t order; 328 313 uintptr_t addr; … … 336 321 #endif 337 322 323 ASSERT(ispwr2(ptl0step)); 338 324 ASSERT(size > 0); 339 325 340 for (addr = ALIGN_DOWN(base, ptl0 _step); addr - 1 < base + size - 1;341 addr += ptl0 _step) {326 for (addr = ALIGN_DOWN(base, ptl0step); addr - 1 < base + size - 1; 327 addr += ptl0step) { 342 328 uintptr_t l1; 343 329 -
kernel/generic/include/lib/ra.h
r4b82445 r738b549 42 42 43 43 typedef struct { 44 IRQ_SPINLOCK_DECLARE(lock);44 SPINLOCK_DECLARE(lock); 45 45 list_t spans; /**< List of arena's spans. */ 46 46 } ra_arena_t; -
kernel/generic/include/mm/slab.h
r4b82445 r738b549 81 81 slab_magazine_t *current; 82 82 slab_magazine_t *last; 83 IRQ_SPINLOCK_DECLARE(lock);83 SPINLOCK_DECLARE(lock); 84 84 } slab_mag_cache_t; 85 85 … … 113 113 list_t full_slabs; /**< List of full slabs */ 114 114 list_t partial_slabs; /**< List of partial slabs */ 115 IRQ_SPINLOCK_DECLARE(slablock);115 SPINLOCK_DECLARE(slablock); 116 116 /* Magazines */ 117 117 list_t magazines; /**< List o full magazines */ 118 IRQ_SPINLOCK_DECLARE(maglock);118 SPINLOCK_DECLARE(maglock); 119 119 120 120 /** CPU cache */ -
kernel/generic/src/lib/ra.c
r4b82445 r738b549 185 185 return NULL; 186 186 187 irq_spinlock_initialize(&arena->lock, "arena_lock");187 spinlock_initialize(&arena->lock, "arena_lock"); 188 188 list_initialize(&arena->spans); 189 189 … … 209 209 210 210 /* TODO: check for overlaps */ 211 irq_spinlock_lock(&arena->lock, true);211 spinlock_lock(&arena->lock); 212 212 list_append(&span->span_link, &arena->spans); 213 irq_spinlock_unlock(&arena->lock, true);213 spinlock_unlock(&arena->lock); 214 214 return true; 215 215 } … … 390 390 ASSERT(ispwr2(alignment)); 391 391 392 irq_spinlock_lock(&arena->lock, true);392 spinlock_lock(&arena->lock); 393 393 list_foreach(arena->spans, cur) { 394 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 398 398 break; 399 399 } 400 irq_spinlock_unlock(&arena->lock, true);400 spinlock_unlock(&arena->lock); 401 401 402 402 return base; … … 406 406 void ra_free(ra_arena_t *arena, uintptr_t base, size_t size) 407 407 { 408 irq_spinlock_lock(&arena->lock, true);408 spinlock_lock(&arena->lock); 409 409 list_foreach(arena->spans, cur) { 410 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 412 412 if (iswithin(span->base, span->size, base, size)) { 413 413 ra_span_free(span, base, size); 414 irq_spinlock_unlock(&arena->lock, true);414 spinlock_unlock(&arena->lock); 415 415 return; 416 416 } 417 417 } 418 irq_spinlock_unlock(&arena->lock, true);418 spinlock_unlock(&arena->lock); 419 419 420 420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").", -
kernel/generic/src/mm/frame.c
r4b82445 r738b549 1086 1086 #endif 1087 1087 1088 /*1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1090 * to prevent deadlock with TLB shootdown.1091 */1092 ipl_t ipl = interrupts_disable();1093 1088 mutex_lock(&mem_avail_mtx); 1094 1089 … … 1103 1098 1104 1099 mutex_unlock(&mem_avail_mtx); 1105 interrupts_restore(ipl);1106 1100 1107 1101 #ifdef CONFIG_DEBUG … … 1167 1161 * Signal that some memory has been freed. 1168 1162 */ 1169 1170 1171 /*1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1173 * to prevent deadlock with TLB shootdown.1174 */1175 ipl_t ipl = interrupts_disable();1176 1163 mutex_lock(&mem_avail_mtx); 1177 1164 if (mem_avail_req > 0) … … 1183 1170 } 1184 1171 mutex_unlock(&mem_avail_mtx); 1185 interrupts_restore(ipl);1186 1172 1187 1173 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/mm/slab.c
r4b82445 r738b549 264 264 freed = cache->destructor(obj); 265 265 266 irq_spinlock_lock(&cache->slablock, true);266 spinlock_lock(&cache->slablock); 267 267 ASSERT(slab->available < cache->objects); 268 268 … … 275 275 /* Free associated memory */ 276 276 list_remove(&slab->link); 277 irq_spinlock_unlock(&cache->slablock, true);277 spinlock_unlock(&cache->slablock); 278 278 279 279 return freed + slab_space_free(cache, slab); … … 284 284 } 285 285 286 irq_spinlock_unlock(&cache->slablock, true);286 spinlock_unlock(&cache->slablock); 287 287 return freed; 288 288 } … … 295 295 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 296 296 { 297 irq_spinlock_lock(&cache->slablock, true);297 spinlock_lock(&cache->slablock); 298 298 299 299 slab_t *slab; … … 308 308 * 309 309 */ 310 irq_spinlock_unlock(&cache->slablock, true);310 spinlock_unlock(&cache->slablock); 311 311 slab = slab_space_alloc(cache, flags); 312 312 if (!slab) 313 313 return NULL; 314 314 315 irq_spinlock_lock(&cache->slablock, true);315 spinlock_lock(&cache->slablock); 316 316 } else { 317 317 slab = list_get_instance(list_first(&cache->partial_slabs), … … 329 329 list_prepend(&slab->link, &cache->partial_slabs); 330 330 331 irq_spinlock_unlock(&cache->slablock, true);331 spinlock_unlock(&cache->slablock); 332 332 333 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { … … 355 355 link_t *cur; 356 356 357 irq_spinlock_lock(&cache->maglock, true);357 spinlock_lock(&cache->maglock); 358 358 if (!list_empty(&cache->magazines)) { 359 359 if (first) … … 366 366 atomic_dec(&cache->magazine_counter); 367 367 } 368 irq_spinlock_unlock(&cache->maglock, true);369 368 369 spinlock_unlock(&cache->maglock); 370 370 return mag; 371 371 } … … 377 377 slab_magazine_t *mag) 378 378 { 379 irq_spinlock_lock(&cache->maglock, true);379 spinlock_lock(&cache->maglock); 380 380 381 381 list_prepend(&mag->link, &cache->magazines); 382 382 atomic_inc(&cache->magazine_counter); 383 383 384 irq_spinlock_unlock(&cache->maglock, true);384 spinlock_unlock(&cache->maglock); 385 385 } 386 386 … … 414 414 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 415 415 416 ASSERT( irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));416 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 417 417 418 418 if (cmag) { /* First try local CPU magazines */ … … 451 451 return NULL; 452 452 453 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);453 spinlock_lock(&cache->mag_cache[CPU->id].lock); 454 454 455 455 slab_magazine_t *mag = get_full_current_mag(cache); 456 456 if (!mag) { 457 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);457 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 458 458 return NULL; 459 459 } 460 460 461 461 void *obj = mag->objs[--mag->busy]; 462 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);462 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 463 463 464 464 atomic_dec(&cache->cached_objs); … … 481 481 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 482 482 483 ASSERT( irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));483 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 484 484 485 485 if (cmag) { … … 531 531 return -1; 532 532 533 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);533 spinlock_lock(&cache->mag_cache[CPU->id].lock); 534 534 535 535 slab_magazine_t *mag = make_empty_current_mag(cache); 536 536 if (!mag) { 537 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);537 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 538 538 return -1; 539 539 } … … 541 541 mag->objs[mag->busy++] = obj; 542 542 543 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);543 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 544 544 545 545 atomic_inc(&cache->cached_objs); … … 593 593 for (i = 0; i < config.cpu_count; i++) { 594 594 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 595 irq_spinlock_initialize(&cache->mag_cache[i].lock,595 spinlock_initialize(&cache->mag_cache[i].lock, 596 596 "slab.cache.mag_cache[].lock"); 597 597 } … … 624 624 list_initialize(&cache->magazines); 625 625 626 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock");627 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock");626 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 627 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 628 628 629 629 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) … … 704 704 size_t i; 705 705 for (i = 0; i < config.cpu_count; i++) { 706 irq_spinlock_lock(&cache->mag_cache[i].lock, true);706 spinlock_lock(&cache->mag_cache[i].lock); 707 707 708 708 mag = cache->mag_cache[i].current; … … 716 716 cache->mag_cache[i].last = NULL; 717 717 718 irq_spinlock_unlock(&cache->mag_cache[i].lock, true);718 spinlock_unlock(&cache->mag_cache[i].lock); 719 719 } 720 720 } -
kernel/generic/src/synch/mutex.c
r4b82445 r738b549 40 40 #include <debug.h> 41 41 #include <arch.h> 42 #include <stacktrace.h>43 42 44 43 /** Initialize mutex. … … 62 61 return semaphore_count_get(&mtx->sem) <= 0; 63 62 } 64 65 #define MUTEX_DEADLOCK_THRESHOLD 10000000066 63 67 64 /** Acquire mutex. … … 90 87 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 91 88 92 unsigned int cnt = 0;93 bool deadlock_reported = false;94 89 do { 95 if (cnt++ > MUTEX_DEADLOCK_THRESHOLD) {96 printf("cpu%u: looping on active mutex %p\n",97 CPU->id, mtx);98 stack_trace();99 cnt = 0;100 deadlock_reported = true;101 }102 90 rc = semaphore_trydown(&mtx->sem); 103 91 } while (SYNCH_FAILED(rc) && 104 92 !(flags & SYNCH_FLAGS_NON_BLOCKING)); 105 if (deadlock_reported)106 printf("cpu%u: not deadlocked\n", CPU->id);107 93 } 108 94 -
kernel/generic/src/synch/spinlock.c
r4b82445 r738b549 44 44 #include <debug.h> 45 45 #include <symtab.h> 46 #include <stacktrace.h>47 46 48 47 #ifdef CONFIG_SMP … … 105 104 "caller=%p (%s)\n", CPU->id, lock, lock->name, 106 105 (void *) CALLER, symtab_fmt_name_lookup(CALLER)); 107 stack_trace();108 106 109 107 i = 0; … … 262 260 int rc = spinlock_trylock(&(lock->lock)); 263 261 264 ASSERT_IRQ_SPINLOCK(! rc || !lock->guard, lock);262 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 265 263 return rc; 266 264 } -
uspace/Makefile
r4b82445 r738b549 51 51 app/mkexfat \ 52 52 app/mkmfs \ 53 app/nterm \54 53 app/redir \ 55 54 app/sbi \ -
uspace/lib/usb/include/usb/debug.h
r4b82445 r738b549 81 81 82 82 /** Default log level. */ 83 #ifdef CONFIG_USB_VERBOSE 83 #ifdef CONFIG_USB_RELEASE_BUILD 84 # define USB_LOG_LEVEL_DEFAULT USB_LOG_LEVEL_INFO 85 #else 84 86 # define USB_LOG_LEVEL_DEFAULT USB_LOG_LEVEL_DEBUG 85 #else86 # define USB_LOG_LEVEL_DEFAULT USB_LOG_LEVEL_INFO87 87 #endif 88 88 -
uspace/srv/net/tcp/conn.c
r4b82445 r738b549 184 184 void tcp_conn_addref(tcp_conn_t *conn) 185 185 { 186 log_msg(LVL_DEBUG 2, "%s: tcp_conn_addref(%p)", conn->name, conn);186 log_msg(LVL_DEBUG, "%s: tcp_conn_addref(%p)", conn->name, conn); 187 187 atomic_inc(&conn->refcnt); 188 188 } … … 196 196 void tcp_conn_delref(tcp_conn_t *conn) 197 197 { 198 log_msg(LVL_DEBUG 2, "%s: tcp_conn_delref(%p)", conn->name, conn);198 log_msg(LVL_DEBUG, "%s: tcp_conn_delref(%p)", conn->name, conn); 199 199 200 200 if (atomic_predec(&conn->refcnt) == 0) … … 312 312 static bool tcp_socket_match(tcp_sock_t *sock, tcp_sock_t *patt) 313 313 { 314 log_msg(LVL_DEBUG 2, "tcp_socket_match(sock=(%x,%u), pat=(%x,%u))",314 log_msg(LVL_DEBUG, "tcp_socket_match(sock=(%x,%u), pat=(%x,%u))", 315 315 sock->addr.ipv4, sock->port, patt->addr.ipv4, patt->port); 316 316 … … 323 323 return false; 324 324 325 log_msg(LVL_DEBUG 2, " -> match");325 log_msg(LVL_DEBUG, " -> match"); 326 326 327 327 return true; … … 331 331 static bool tcp_sockpair_match(tcp_sockpair_t *sp, tcp_sockpair_t *pattern) 332 332 { 333 log_msg(LVL_DEBUG 2, "tcp_sockpair_match(%p, %p)", sp, pattern);333 log_msg(LVL_DEBUG, "tcp_sockpair_match(%p, %p)", sp, pattern); 334 334 335 335 if (!tcp_socket_match(&sp->local, &pattern->local)) … … 360 360 tcp_conn_t *conn = list_get_instance(link, tcp_conn_t, link); 361 361 tcp_sockpair_t *csp = &conn->ident; 362 log_msg(LVL_DEBUG 2, "compare with conn (f:(%x,%u), l:(%x,%u))",362 log_msg(LVL_DEBUG, "compare with conn (f:(%x,%u), l:(%x,%u))", 363 363 csp->foreign.addr.ipv4, csp->foreign.port, 364 364 csp->local.addr.ipv4, csp->local.port); -
uspace/srv/net/tcp/ncsim.c
r4b82445 r738b549 44 44 #include <io/log.h> 45 45 #include <stdlib.h> 46 #include < fibril.h>46 #include <thread.h> 47 47 #include "conn.h" 48 48 #include "ncsim.h" … … 119 119 } 120 120 121 /** Network condition simulator handler fibril. */122 static int tcp_ncsim_fibril(void *arg)121 /** Network condition simulator handler thread. */ 122 static void tcp_ncsim_thread(void *arg) 123 123 { 124 124 link_t *link; … … 126 126 int rc; 127 127 128 log_msg(LVL_DEBUG, "tcp_ncsim_ fibril()");128 log_msg(LVL_DEBUG, "tcp_ncsim_thread()"); 129 129 130 130 … … 151 151 free(sqe); 152 152 } 153 154 /* Not reached */155 return 0;156 153 } 157 154 158 /** Start simulator handler fibril. */159 void tcp_ncsim_ fibril_start(void)155 /** Start simulator handler thread. */ 156 void tcp_ncsim_thread_start(void) 160 157 { 161 fid_t fid; 158 thread_id_t tid; 159 int rc; 162 160 163 log_msg(LVL_DEBUG, "tcp_ncsim_ fibril_start()");161 log_msg(LVL_DEBUG, "tcp_ncsim_thread_start()"); 164 162 165 fid = fibril_create(tcp_ncsim_fibril, NULL);166 if ( fid == 0) {167 log_msg(LVL_ERROR, "Failed creating ncsim fibril.");163 rc = thread_create(tcp_ncsim_thread, NULL, "ncsim", &tid); 164 if (rc != EOK) { 165 log_msg(LVL_ERROR, "Failed creating ncsim thread."); 168 166 return; 169 167 } 170 171 fibril_add_ready(fid);172 168 } 173 169 -
uspace/srv/net/tcp/ncsim.h
r4b82445 r738b549 40 40 extern void tcp_ncsim_init(void); 41 41 extern void tcp_ncsim_bounce_seg(tcp_sockpair_t *, tcp_segment_t *); 42 extern void tcp_ncsim_fibril_start(void); 42 extern void tcp_ncsim_thread_start(void); 43 43 44 44 45 #endif -
uspace/srv/net/tcp/rqueue.c
r4b82445 r738b549 39 39 #include <io/log.h> 40 40 #include <stdlib.h> 41 #include < fibril.h>41 #include <thread.h> 42 42 #include "conn.h" 43 43 #include "pdu.h" … … 128 128 } 129 129 130 /** Receive queue handler fibril. */131 static int tcp_rqueue_fibril(void *arg)130 /** Receive queue handler thread. */ 131 static void tcp_rqueue_thread(void *arg) 132 132 { 133 133 link_t *link; 134 134 tcp_rqueue_entry_t *rqe; 135 135 136 log_msg(LVL_DEBUG, "tcp_rqueue_ fibril()");136 log_msg(LVL_DEBUG, "tcp_rqueue_thread()"); 137 137 138 138 while (true) { … … 142 142 tcp_as_segment_arrived(&rqe->sp, rqe->seg); 143 143 } 144 145 /* Not reached */146 return 0;147 144 } 148 145 149 /** Start receive queue handler fibril. */150 void tcp_rqueue_ fibril_start(void)146 /** Start receive queue handler thread. */ 147 void tcp_rqueue_thread_start(void) 151 148 { 152 fid_t fid; 149 thread_id_t tid; 150 int rc; 153 151 154 log_msg(LVL_DEBUG, "tcp_rqueue_ fibril_start()");152 log_msg(LVL_DEBUG, "tcp_rqueue_thread_start()"); 155 153 156 fid = fibril_create(tcp_rqueue_fibril, NULL);157 if ( fid == 0) {158 log_msg(LVL_ERROR, "Failed creating rqueue fibril.");154 rc = thread_create(tcp_rqueue_thread, NULL, "rqueue", &tid); 155 if (rc != EOK) { 156 log_msg(LVL_ERROR, "Failed creating rqueue thread."); 159 157 return; 160 158 } 161 162 fibril_add_ready(fid);163 159 } 164 160 -
uspace/srv/net/tcp/rqueue.h
r4b82445 r738b549 42 42 extern void tcp_rqueue_insert_seg(tcp_sockpair_t *, tcp_segment_t *); 43 43 extern void tcp_rqueue_handler(void *); 44 extern void tcp_rqueue_ fibril_start(void);44 extern void tcp_rqueue_thread_start(void); 45 45 46 46 -
uspace/srv/net/tcp/segment.c
r4b82445 r738b549 248 248 void tcp_segment_dump(tcp_segment_t *seg) 249 249 { 250 log_msg(LVL_DEBUG 2, "Segment dump:");251 log_msg(LVL_DEBUG 2, " - ctrl = %u", (unsigned)seg->ctrl);252 log_msg(LVL_DEBUG 2, " - seq = % " PRIu32, seg->seq);253 log_msg(LVL_DEBUG 2, " - ack = % " PRIu32, seg->ack);254 log_msg(LVL_DEBUG 2, " - len = % " PRIu32, seg->len);255 log_msg(LVL_DEBUG 2, " - wnd = % " PRIu32, seg->wnd);256 log_msg(LVL_DEBUG 2, " - up = % " PRIu32, seg->up);250 log_msg(LVL_DEBUG, "Segment dump:"); 251 log_msg(LVL_DEBUG, " - ctrl = %u", (unsigned)seg->ctrl); 252 log_msg(LVL_DEBUG, " - seq = % " PRIu32, seg->seq); 253 log_msg(LVL_DEBUG, " - ack = % " PRIu32, seg->ack); 254 log_msg(LVL_DEBUG, " - len = % " PRIu32, seg->len); 255 log_msg(LVL_DEBUG, " - wnd = % " PRIu32, seg->wnd); 256 log_msg(LVL_DEBUG, " - up = % " PRIu32, seg->up); 257 257 } 258 258 -
uspace/srv/net/tcp/sock.c
r4b82445 r738b549 51 51 #include "ucall.h" 52 52 53 #define FRAGMENT_SIZE 1024 54 53 55 #define MAX_BACKLOG 128 54 56 … … 64 66 static void tcp_sock_connection(ipc_callid_t iid, ipc_call_t *icall, void *arg); 65 67 static void tcp_sock_cstate_cb(tcp_conn_t *conn, void *arg); 66 static int tcp_sock_recv_fibril(void *arg);67 68 68 69 int tcp_sock_init(void) … … 96 97 async_exch_t *exch = async_exchange_begin(sock_core->sess); 97 98 async_msg_5(exch, NET_SOCKET_RECEIVED, (sysarg_t)sock_core->socket_id, 98 TCP_SOCK_FRAGMENT_SIZE, 0, 0, 1);99 FRAGMENT_SIZE, 0, 0, 1); 99 100 async_exchange_end(exch); 100 101 } … … 105 106 async_exch_t *exch = async_exchange_begin(lsock_core->sess); 106 107 async_msg_5(exch, NET_SOCKET_ACCEPTED, (sysarg_t)lsock_core->socket_id, 107 TCP_SOCK_FRAGMENT_SIZE, 0, 0, 0);108 FRAGMENT_SIZE, 0, 0, 0); 108 109 async_exchange_end(exch); 109 110 } 110 111 111 static int tcp_sock_create(tcp_client_t *client, tcp_sockdata_t **rsock)112 static void tcp_sock_socket(tcp_client_t *client, ipc_callid_t callid, ipc_call_t call) 112 113 { 113 114 tcp_sockdata_t *sock; 114 115 log_msg(LVL_DEBUG, "tcp_sock_create()");116 *rsock = NULL;117 118 sock = calloc(sizeof(tcp_sockdata_t), 1);119 if (sock == NULL)120 return ENOMEM;121 122 fibril_mutex_initialize(&sock->lock);123 sock->client = client;124 125 sock->recv_buffer_used = 0;126 sock->recv_error = TCP_EOK;127 fibril_mutex_initialize(&sock->recv_buffer_lock);128 fibril_condvar_initialize(&sock->recv_buffer_cv);129 list_initialize(&sock->ready);130 131 *rsock = sock;132 return EOK;133 }134 135 static void tcp_sock_uncreate(tcp_sockdata_t *sock)136 {137 log_msg(LVL_DEBUG, "tcp_sock_uncreate()");138 free(sock);139 }140 141 static int tcp_sock_finish_setup(tcp_sockdata_t *sock, int *sock_id)142 {143 115 socket_core_t *sock_core; 144 int rc;145 146 log_msg(LVL_DEBUG, "tcp_sock_finish_setup()");147 148 sock->recv_fibril = fibril_create(tcp_sock_recv_fibril, sock);149 if (sock->recv_fibril == 0)150 return ENOMEM;151 152 rc = socket_create(&sock->client->sockets, sock->client->sess,153 sock, sock_id);154 155 if (rc != EOK)156 return rc;157 158 sock_core = socket_cores_find(&sock->client->sockets, *sock_id);159 assert(sock_core != NULL);160 sock->sock_core = sock_core;161 162 return EOK;163 }164 165 static void tcp_sock_socket(tcp_client_t *client, ipc_callid_t callid, ipc_call_t call)166 {167 tcp_sockdata_t *sock;168 116 int sock_id; 169 117 int rc; … … 171 119 172 120 log_msg(LVL_DEBUG, "tcp_sock_socket()"); 173 174 rc = tcp_sock_create(client, &sock); 175 if (rc != EOK) { 176 async_answer_0(callid, rc); 177 return; 178 } 179 121 sock = calloc(sizeof(tcp_sockdata_t), 1); 122 if (sock == NULL) { 123 async_answer_0(callid, ENOMEM); 124 return; 125 } 126 127 fibril_mutex_initialize(&sock->lock); 128 sock->client = client; 180 129 sock->laddr.ipv4 = TCP_IPV4_ANY; 181 130 sock->lconn = NULL; 182 131 sock->backlog = 0; 132 list_initialize(&sock->ready); 183 133 184 134 sock_id = SOCKET_GET_SOCKET_ID(call); 185 rc = tcp_sock_finish_setup(sock, &sock_id);135 rc = socket_create(&client->sockets, client->sess, sock, &sock_id); 186 136 if (rc != EOK) { 187 tcp_sock_uncreate(sock);188 137 async_answer_0(callid, rc); 189 138 return; 190 139 } 191 140 141 sock_core = socket_cores_find(&client->sockets, sock_id); 142 assert(sock_core != NULL); 143 sock->sock_core = sock_core; 144 192 145 SOCKET_SET_SOCKET_ID(answer, sock_id); 193 146 194 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, TCP_SOCK_FRAGMENT_SIZE);147 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, FRAGMENT_SIZE); 195 148 SOCKET_SET_HEADER_SIZE(answer, sizeof(tcp_header_t)); 196 149 … … 408 361 } 409 362 410 if (rc == EOK)411 fibril_add_ready(socket->recv_fibril);412 413 363 async_answer_0(callid, rc); 364 365 /* Push one fragment notification to client's queue */ 366 tcp_sock_notify_data(sock_core); 367 log_msg(LVL_DEBUG, "tcp_sock_connect(): notify conn\n"); 414 368 } 415 369 … … 420 374 int asock_id; 421 375 socket_core_t *sock_core; 376 socket_core_t *asock_core; 422 377 tcp_sockdata_t *socket; 423 378 tcp_sockdata_t *asocket; … … 489 444 /* Allocate socket for accepted connection */ 490 445 491 rc = tcp_sock_create(client, &asocket); 492 if (rc != EOK) { 493 fibril_mutex_unlock(&socket->lock); 494 async_answer_0(callid, rc); 495 return; 496 } 497 446 log_msg(LVL_DEBUG, "tcp_sock_accept(): allocate asocket\n"); 447 asocket = calloc(sizeof(tcp_sockdata_t), 1); 448 if (asocket == NULL) { 449 fibril_mutex_unlock(&socket->lock); 450 async_answer_0(callid, ENOMEM); 451 return; 452 } 453 454 fibril_mutex_initialize(&asocket->lock); 455 asocket->client = client; 498 456 asocket->conn = conn; 499 457 log_msg(LVL_DEBUG, "tcp_sock_accept():create asocket\n"); 500 458 501 rc = tcp_sock_finish_setup(asocket, &asock_id);459 rc = socket_create(&client->sockets, client->sess, asocket, &asock_id); 502 460 if (rc != EOK) { 503 tcp_sock_uncreate(asocket);504 461 fibril_mutex_unlock(&socket->lock); 505 462 async_answer_0(callid, rc); 506 463 return; 507 464 } 508 509 fibril_add_ready(asocket->recv_fibril);510 511 465 log_msg(LVL_DEBUG, "tcp_sock_accept(): find acore\n"); 512 466 513 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, TCP_SOCK_FRAGMENT_SIZE); 467 asock_core = socket_cores_find(&client->sockets, asock_id); 468 assert(asock_core != NULL); 469 470 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, FRAGMENT_SIZE); 514 471 SOCKET_SET_SOCKET_ID(answer, asock_id); 515 472 SOCKET_SET_ADDRESS_LENGTH(answer, sizeof(struct sockaddr_in)); 516 473 517 async_answer_3(callid, asock et->sock_core->socket_id,474 async_answer_3(callid, asock_core->socket_id, 518 475 IPC_GET_ARG1(answer), IPC_GET_ARG2(answer), 519 476 IPC_GET_ARG3(answer)); … … 521 478 /* Push one fragment notification to client's queue */ 522 479 log_msg(LVL_DEBUG, "tcp_sock_accept(): notify data\n"); 480 tcp_sock_notify_data(asock_core); 523 481 fibril_mutex_unlock(&socket->lock); 524 482 } … … 534 492 ipc_callid_t wcallid; 535 493 size_t length; 536 uint8_t buffer[ TCP_SOCK_FRAGMENT_SIZE];494 uint8_t buffer[FRAGMENT_SIZE]; 537 495 tcp_error_t trc; 538 496 int rc; … … 565 523 } 566 524 567 if (length > TCP_SOCK_FRAGMENT_SIZE)568 length = TCP_SOCK_FRAGMENT_SIZE;525 if (length > FRAGMENT_SIZE) 526 length = FRAGMENT_SIZE; 569 527 570 528 rc = async_data_write_finalize(wcallid, buffer, length); … … 602 560 603 561 IPC_SET_ARG1(answer, 0); 604 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, TCP_SOCK_FRAGMENT_SIZE);562 SOCKET_SET_DATA_FRAGMENT_SIZE(answer, FRAGMENT_SIZE); 605 563 async_answer_2(callid, EOK, IPC_GET_ARG1(answer), 606 564 IPC_GET_ARG2(answer)); … … 623 581 ipc_call_t answer; 624 582 ipc_callid_t rcallid; 583 uint8_t buffer[FRAGMENT_SIZE]; 625 584 size_t data_len; 585 xflags_t xflags; 586 tcp_error_t trc; 626 587 struct sockaddr_in addr; 627 588 tcp_sock_t *rsock; … … 650 611 (void)flags; 651 612 652 log_msg(LVL_DEBUG, "tcp_sock_recvfrom(): lock recv_buffer_lock"); 653 fibril_mutex_lock(&socket->recv_buffer_lock); 654 while (socket->recv_buffer_used == 0 && socket->recv_error == TCP_EOK) { 655 log_msg(LVL_DEBUG, "wait for recv_buffer_cv + recv_buffer_used != 0"); 656 fibril_condvar_wait(&socket->recv_buffer_cv, 657 &socket->recv_buffer_lock); 658 } 659 660 log_msg(LVL_DEBUG, "Got data in sock recv_buffer"); 661 662 data_len = socket->recv_buffer_used; 663 rc = socket->recv_error; 664 665 switch (socket->recv_error) { 613 trc = tcp_uc_receive(socket->conn, buffer, FRAGMENT_SIZE, &data_len, 614 &xflags); 615 log_msg(LVL_DEBUG, "**** tcp_uc_receive done"); 616 617 switch (trc) { 666 618 case TCP_EOK: 667 619 rc = EOK; … … 678 630 } 679 631 680 log_msg(LVL_DEBUG, "**** recv result-> %d", rc);632 log_msg(LVL_DEBUG, "**** tcp_uc_receive -> %d", rc); 681 633 if (rc != EOK) { 682 fibril_mutex_unlock(&socket->recv_buffer_lock);683 634 fibril_mutex_unlock(&socket->lock); 684 635 async_answer_0(callid, rc); … … 695 646 log_msg(LVL_DEBUG, "addr read receive"); 696 647 if (!async_data_read_receive(&rcallid, &addr_length)) { 697 fibril_mutex_unlock(&socket->recv_buffer_lock);698 648 fibril_mutex_unlock(&socket->lock); 699 649 async_answer_0(callid, EINVAL); … … 707 657 rc = async_data_read_finalize(rcallid, &addr, addr_length); 708 658 if (rc != EOK) { 709 fibril_mutex_unlock(&socket->recv_buffer_lock);710 659 fibril_mutex_unlock(&socket->lock); 711 660 async_answer_0(callid, EINVAL); … … 716 665 log_msg(LVL_DEBUG, "data read receive"); 717 666 if (!async_data_read_receive(&rcallid, &length)) { 718 fibril_mutex_unlock(&socket->recv_buffer_lock);719 667 fibril_mutex_unlock(&socket->lock); 720 668 async_answer_0(callid, EINVAL); … … 726 674 727 675 log_msg(LVL_DEBUG, "data read finalize"); 728 rc = async_data_read_finalize(rcallid, socket->recv_buffer, length); 729 730 socket->recv_buffer_used -= length; 731 log_msg(LVL_DEBUG, "tcp_sock_recvfrom: %zu left in buffer", 732 socket->recv_buffer_used); 733 if (socket->recv_buffer_used > 0) { 734 memmove(socket->recv_buffer, socket->recv_buffer + length, 735 socket->recv_buffer_used); 736 tcp_sock_notify_data(socket->sock_core); 737 } 738 739 fibril_condvar_broadcast(&socket->recv_buffer_cv); 676 rc = async_data_read_finalize(rcallid, buffer, length); 740 677 741 678 if (length < data_len && rc == EOK) … … 744 681 SOCKET_SET_READ_DATA_LENGTH(answer, length); 745 682 async_answer_1(callid, EOK, IPC_GET_ARG1(answer)); 746 747 fibril_mutex_unlock(&socket->recv_buffer_lock); 683 684 /* Push one fragment notification to client's queue */ 685 tcp_sock_notify_data(sock_core); 748 686 fibril_mutex_unlock(&socket->lock); 749 687 } … … 756 694 tcp_error_t trc; 757 695 int rc; 696 uint8_t buffer[FRAGMENT_SIZE]; 697 size_t data_len; 698 xflags_t xflags; 758 699 759 700 log_msg(LVL_DEBUG, "tcp_sock_close()"); … … 776 717 return; 777 718 } 719 720 /* Drain incoming data. This should really be done in the background. */ 721 do { 722 trc = tcp_uc_receive(socket->conn, buffer, 723 FRAGMENT_SIZE, &data_len, &xflags); 724 } while (trc == TCP_EOK); 725 726 tcp_uc_delete(socket->conn); 778 727 } 779 728 … … 827 776 tcp_sock_notify_aconn(socket->sock_core); 828 777 fibril_mutex_unlock(&socket->lock); 829 }830 831 static int tcp_sock_recv_fibril(void *arg)832 {833 tcp_sockdata_t *sock = (tcp_sockdata_t *)arg;834 size_t data_len;835 xflags_t xflags;836 tcp_error_t trc;837 838 log_msg(LVL_DEBUG, "tcp_sock_recv_fibril()");839 840 while (true) {841 log_msg(LVL_DEBUG, "call tcp_uc_receive()");842 fibril_mutex_lock(&sock->recv_buffer_lock);843 while (sock->recv_buffer_used != 0)844 fibril_condvar_wait(&sock->recv_buffer_cv,845 &sock->recv_buffer_lock);846 847 trc = tcp_uc_receive(sock->conn, sock->recv_buffer,848 TCP_SOCK_FRAGMENT_SIZE, &data_len, &xflags);849 850 if (trc != TCP_EOK) {851 sock->recv_error = trc;852 fibril_condvar_broadcast(&sock->recv_buffer_cv);853 fibril_mutex_unlock(&sock->recv_buffer_lock);854 tcp_sock_notify_data(sock->sock_core);855 break;856 }857 858 log_msg(LVL_DEBUG, "got data - broadcast recv_buffer_cv");859 860 sock->recv_buffer_used = data_len;861 fibril_condvar_broadcast(&sock->recv_buffer_cv);862 fibril_mutex_unlock(&sock->recv_buffer_lock);863 tcp_sock_notify_data(sock->sock_core);864 }865 866 tcp_uc_delete(sock->conn);867 868 return 0;869 778 } 870 779 -
uspace/srv/net/tcp/tcp.c
r4b82445 r738b549 180 180 181 181 tcp_rqueue_init(); 182 tcp_rqueue_ fibril_start();182 tcp_rqueue_thread_start(); 183 183 184 184 tcp_ncsim_init(); 185 tcp_ncsim_ fibril_start();185 tcp_ncsim_thread_start(); 186 186 187 187 if (0) tcp_test(); -
uspace/srv/net/tcp/tcp_type.h
r4b82445 r738b549 39 39 #include <async.h> 40 40 #include <bool.h> 41 #include <fibril.h>42 41 #include <fibril_synch.h> 43 42 #include <socket_core.h> … … 332 331 } tcp_client_t; 333 332 334 #define TCP_SOCK_FRAGMENT_SIZE 1024335 336 333 typedef struct tcp_sockdata { 337 334 /** Lock */ … … 351 348 /** List of connections (from lconn) that are ready to be accepted */ 352 349 list_t ready; 353 /** Receiving fibril */354 fid_t recv_fibril;355 uint8_t recv_buffer[TCP_SOCK_FRAGMENT_SIZE];356 size_t recv_buffer_used;357 fibril_mutex_t recv_buffer_lock;358 fibril_condvar_t recv_buffer_cv;359 tcp_error_t recv_error;360 350 } tcp_sockdata_t; 361 351 -
uspace/srv/net/tcp/test.c
r4b82445 r738b549 38 38 #include <errno.h> 39 39 #include <stdio.h> 40 #include < fibril.h>40 #include <thread.h> 41 41 #include <str.h> 42 42 #include "tcp_type.h" … … 47 47 #define RCV_BUF_SIZE 64 48 48 49 static inttest_srv(void *arg)49 static void test_srv(void *arg) 50 50 { 51 51 tcp_conn_t *conn; … … 84 84 85 85 printf("test_srv() terminating\n"); 86 return 0;87 86 } 88 87 89 static inttest_cli(void *arg)88 static void test_cli(void *arg) 90 89 { 91 90 tcp_conn_t *conn; … … 113 112 printf("C: User close...\n"); 114 113 tcp_uc_close(conn); 115 116 return 0;117 114 } 118 115 119 116 void tcp_test(void) 120 117 { 121 fid_t srv_fid; 122 fid_t cli_fid; 118 thread_id_t srv_tid; 119 thread_id_t cli_tid; 120 int rc; 123 121 124 122 printf("tcp_test()\n"); … … 127 125 128 126 if (0) { 129 srv_fid = fibril_create(test_srv, NULL);130 if ( srv_fid == 0) {131 printf("Failed to create server fibril.\n");127 rc = thread_create(test_srv, NULL, "test_srv", &srv_tid); 128 if (rc != EOK) { 129 printf("Failed to create server thread.\n"); 132 130 return; 133 131 } 134 135 fibril_add_ready(srv_fid);136 132 } 137 133 138 134 if (0) { 139 cli_fid = fibril_create(test_cli, NULL);140 if ( cli_fid == 0) {141 printf("Failed to create client fibril.\n");135 rc = thread_create(test_cli, NULL, "test_cli", &cli_tid); 136 if (rc != EOK) { 137 printf("Failed to create client thread.\n"); 142 138 return; 143 139 } 144 145 fibril_add_ready(cli_fid);146 140 } 147 141 }
Note:
See TracChangeset
for help on using the changeset viewer.