Conal's Journal - a non-linear personal web notebook
Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
background:[[ColorPalette::TertiaryPale]];
border-left:1px solid [[ColorPalette::TertiaryLight]];
border-top:1px solid [[ColorPalette::TertiaryLight]];
border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which use a logographic writing system and need larger font sizes.
***/

/*{{{*/
body {font-size:0.8em;}

#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}

.subtitle {font-size:0.8em;}

.viewer table.listView {font-size:0.95em;}

.htmlarea .toolbarHA table {border:1px solid ButtonFace; margin:0em 0em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none ! important;}
#displayArea {margin: 1em 1em 0em 1em;}
/* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
noscript {display:none;}
}
/*}}}*/
<!--{{{-->
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar closeTiddler closeOthers +editTiddler > fields syncing permalink references jump'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar +saveTiddler -cancelTiddler deleteTiddler'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<!--}}}-->
To get started with this blank TiddlyWiki, you'll need to modify the following tiddlers:
* SiteTitle & SiteSubtitle: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* DefaultTiddlers: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
These InterfaceOptions for customising TiddlyWiki are saved in your browser

<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations

----

* Ordered a Kinesis Maxim ergonomic keyboard
* Messed around more with my [[Emergence of NVC|http://emergence.awakeningcompassion.com]] tiddlywiki.
* How about a [[Pajama|http://conal.net/Pajama]] applet in a tiddler?
<html><p align=center>
<applet code="swirl_wedge_annulus_ybrings.class" archive="../pajama.jar,swirl_wedge_annulus_ybrings.jar" width="210" height="226" codebase="http://conal.net/Pajama/examples">
<param name=swirliness value=0.5>
<param name=wedges value=10>
<param name=coverage value=0.7>
</applet>
</p></html>
* Got my authentication sign-up script to be editable.  I renamed signup and login_form, made new templates, and copied in their content.
* Edited the instructions to suggest "First Last" and why.  I'm greatly relieved to have eliminated the inconsistency in instructions.
* Printed Awakening Compassion materials.  Brought them to Erika, along with the cards and flyers she'll put out for her (first) NVC intro tonight.
* Called Patty Z.  Can she print if I email her a PDF?  No.  Computer problems.
* [[visit with Patty Z on 2006-10-24]]
* Printed the [[Emergence of NVC|http://emergence.awakeningcompassion.com]] notebook, after doing a lot of tweaking for font sizes.
* Changed how I use "to do".  Made it a separate [[misc to do]] tiddler. Move elements into journal entries when done.
* //Idea:// [[tiddlywiki mixins]].
* //Idea:// [[parametric tiddlers]].
* Entered more notes from yesterday, including my +++^[visit with Patty]<<tiddler "visit with Patty Z on 2006-10-24">>===.
* Sent Sandy Fox a note about [[yesterday|2006-10-24]]'s visit with Patty.  Sandy had encouraged me and offered some energy.
* Reply to [[Ralf Hutchison]] about coming to work with me and live with us for a while.
* Started using NestedSlidersPlugin, so I can have chunks hide & reveal.  Very neat!
* Clean up the "All" tiddler list in [[Emergence of NVC| http://emergence.awakeningcompassion.com]], so that it shows content only, and not mechanism.
* Moved MainMenu to horizontal and reclaim the left space for tiddler content
* Started using dates as tags of idea & conversation tiddlers.   Next, move "tagging" in ViewTemplate.
* Here's an idea about proxying for Haloscan-based comments: use {{{TinyHTTPProxy.py}}} from SUZUKI Hisao, and tweak my Apache rewrite rules to route the request outside of Zope.  First, try the proxy on its own.  Didn't work for me.  Maybe I don't understand how to use it.
* Tweaked the instructions in HorizontalMainMenuStyles to be self-contained.
* Added a picture to [[my user tiddler|http://emergence.awakeningcompassion.com/#%5B%5BConal%20Elliott%5D%5D]] in the [[Emergence of NVC|http://emergence.awakeningcompassion.com]] TW.
* Integrated Sandy's rewritten form of the psncc training "prerequisites".  It took several passes of rewriting and exchanges with Sandy.
* Evening [[phone call with Rita Herzog and Sandy Fox]].
* Pan returned my call.  We chatted some about the Emergence proposal.
** He asked how do we deal with someone giving a great workshop but not really on NVC.  Presumably hey'd get positive reviews.  How would the online matching system let people know about the NVC mismatch?  I think the answer is in the richness of the feedback, beyond a single number.
** He has a different perception about the use of the word "trainer" in psncc.  He hears people say "certified" when they mean it, and otherwise they mean people who train.  I guess I could check more.  I do believe Patty, Mel, Holly E, and even Moreah mean certified when they don't say so.  And Sandy doesn't, and apparently not Pan.
** He suggested that I especially listen to Barb tonight to get to hear the beauty she experiences in the assessment process
** I guess I'm still somewhat steamed after last night's phone call with Rita H.
* Found [[CommentPlugin]].  It just adds a comment tiddler and sets up tags.  Nice idea and very elegant, but requires a login.
* Wrote up notes on psncc [[emergence meeting of 2006-10-27]]

* Snagged plugins from TiddlyTools:
** ResizeEditorPlugin: works great, but I think it ate my control-enter
** AutoTaggerPlugin: wow!
** ResizeEditTextPlugin: doesn't work yet.  Version problem, since TW isn't 2.1 yet.
** RearrangeTiddlersPlugin: I love it!
** CollapseTiddlersPlusPlugin (and corresponding CollapsedTemplate)
* Some afterthoughts from the [[emergence meeting of 2006-10-27]]:
** "Compassionate Consciousness"
** "Learning, Living, Sharing, and //Evolving// NVC"
** I want to write an article on [[the evolution of Nonviolent Communication]].
* Went to Sandy's to watch "The Secret" with her & others.
* I'm experimenting with placement of {{{tagging}}} in the TW.  For now, I tweaked  StyleSheet to put tags & tagging into a table.  Looks okay but clumsily expressd.
* Started [[notes on CSS]] .
* Fixed a bug in {{{ZiddlyWiki/actions/import}}}.  Wasn't converting things like "{{{&amp;}}}" back to "{{{&}}}".
* LTU article ["Future of Software Design?"|http://lambda-the-ultimate.org/node/1805].  Peter van Roy is exploring "self-managing systems", which are based on cybernetics.
* Why not host my darcs repos on haskell.org?
* I'm getting an error message every time I open the Emergence ziddlywiki.  I don't know why. <<smiley :(>>
* Got the "new journal" button to add "journal" as a tag.  I looked at the {{{newJournal}}} source and saw that parameters after the first one get used as tags.
* Stopped resisting self-tagging of journal tiddlers.
* Idea: write some articles about NVC and solicit input and dialog from psncc folks, cnvc certification candidates, cnvc certified trainers, etc.  In addition to doing some writing, stir up some discussion and awareness.  Oh: make a wiki for the evolving articles.  Invite people to add comments directly.  Or to send email or have recorded phone conversations, which will get added in to the wiki.
** That principle of Marshall's in which he shifts labeling language to relationship language, such as reflecting "That's a terrible picture" into "Oh, you don't like the picture?"  I've heard him call that principle "Never let anyone tell you who you are."  Offer and invite examples and translations.  Throw into the mix: "trainer".  Guess: "I trust that person to present NVC in a way I agree with."
** "The Evolution of NVC".  Invite comments on past, present, and future.  Trace how NVC has evolved so far, which ideas came from whom (e.g. ~Max-Neef on needs).  Speculate on possible future trends.  In this light, consider the idea of "preserving the integrity of the process".
* Looking for wysiwyg editors for TiddlyWiki.
** [[HTMLArea|http://www.dynarch.com/projects/htmlarea]] wysiwyg editor.  No longer maintained.  Points to [[InterAKT|http://www.interaktonline.com]]'s KTML 4.
** InterAKT was acquired by Adobe in September.  Apparently there is no longer a free KTML Lite.  <<smiley :(>>
** Found [[AsciiMathML: Translating ASCII math notation to Presentation MathML|http://www1.chapman.edu/~jipsen/asciimath.html]]. Pretty neat!  Combined with ~HtmlArea in the [[ASciencePad|http://math.chapman.edu/~jipsen/asciencepad/asciencepad.html]] TW.  I want TW markup, not HTML, however.
** [[Wikiwyg|http://www.wikiwyg.net]] looks like a reasonably good fit.
* Edited and sent note about psncc name.
* Called Kelly Darlington about getting together.  Breakfast 9am tomorrow at Alexa's in Bothell.
* Trying again with HaloscanMacro.
** Seems to work fine without RSS, which is okay with me.
** Found http://gravatar.com, which serves out 80x80 pixel "globally recognized avatars" and is used by haloscan.  Currently out of action due to growth and non-scalable implementaton.  Up with 2.0 very soon.  See http://gravatar.com/blog.
** Of course, Haloscan serves ads in the comment window.  I could probably figure out a tricky way to filter them out, though it wouldn't be simple.  Better would be to find or create an Open Source replacement.
** Signed up for another Haloscan account: "emergenceAcCom".  I'll want one user name per TW site.  No -- instead, I tweaked the HaloscMacro source to prepend an ~idPrefix specific to a TW.
** Changed haloscan date format from {{{m.d.y - g:i a}}} to {{{Y-m-d - g:i:s a}}} to {{{r}}} to {{{Y-m-d - g:i a}}}
** Contributed $15 to get a premium account (min$12), which (a) drops the ads in comments, and (b) lets me export comments.
* I also tried CommentPlugin.
** It works okay but requires either a log-in or that I make my TW open to anonymous edits.
** Perhaps I could do something on the ZW server end so that comments tiddlers are specially allowed without log-in.
** I see that comments are only editable by the comment's author.  Well, not exactly.  The "(edit)" button shows up if the same user is logged in--including "Anonymous User", i.e., no log-in.  This fact suggests a simple approach:
*** Encourage people to log in, so that their comments are attributed to them and cannot be changed by others who are not logged in (and not easily changed by anyone, since doing so requires finding the comment hidden via its ~excludeLists tag).
*** Recommend that non-comments be tagged as "protected".  Oh -- not quite, as "protected" means "Only editable by its author or an administrator", according to http://ziddlywiki.com/#SpecialTags.  Hmm.

* Doing some hunting around for a wysiwyg editor
* Puzzling over Zope organization.
** Experiment: add cookie_auth to Journal.  Then, when I click on "login", I get the title "Conal's Journal" instead of "Emergence of NVC".
** Conclusion: I want to make sub-directories for emergence, journal, etc.  People get user accounts for them separately.  I have an account at the root, which gives me access to all.
** Drawback: after logging in, the url is messed up.  Has an extra Folder level, e.g., journal.net.conal/Journal.
** Wait for Bob M to finish his 2.1 ZW, which will handle logging in via AJAX rather than by changing pages.
* Breakfast at Alexa's in Bothell with Kelly D.
* //Wow!// I hit upon a new way to organize my journal entries.  Tag each day's entry with <<tag day>> and with the month (e.g. <<tag 2006-10>>).  Similarly, tag each month with <<tag month>> and the month's name (e.g. <<tag October>>) and its year (e.g. <<tag 2006>>).  Tag each year with <<tag year>> (could also tag with <<tag decade>> or whatever).  Looks great in the [[site map]].  I'd like all this tagging to happen automatically.  For now I've tweaked the SideBarOptions "new journal" button to use the tag <<tag 2006-11>>.
* Emergence proposal:
** Folded in questions & answers from Friday night.
** Removed the last people labels (as far as I know), namely "sharers" and "seekers", in favor of action language (what people //do// not what they //are//).  In most cases, the result is more succinct and, to me, clearer.
** Got notes from Friday night's yummy meeting.  Edited into a [[more readable form and put on the first wiki|http://sharenvc.net/meeting_notes/2006-10-27]].
** Sent out a note with the tweaked proposal and the notes from Friday, in preparation for tomorrow's meeting.
Type the text for 'New Tiddler'
* Read [[The 18 Mistakes That Kill Startups|http://www.paulgraham.com/startupmistakes.html]] in Paul Graham's blog.  Worth remembering and revisiting.
** Single-founder myth.  "Vote of no confidence" from friends, no //esprit de corps// when things go wrong.
** Aside on java applets
>The scary thing about platforms is that there are always some that seem to outsiders to be fine, responsible choices and yet, like Windows in the 90s, will destroy you if you choose them. Java applets were probably the most spectacular example. This was supposed to be the new way of delivering applications. Presumably it killed just about 100% of the startups who believed that.
* I found out why Firefox prints so huge for me.  In {{{File/Page Setup}}}, the scale was set to 150%.  Changed to 100%.  Then to "shrink to fit page width".  Yay!  That dialog has margins and header/footer content as well.
* Lesson (again): meander through a program's options, taking note of what I find.  Don't wait until I have urgent demand for a feature.
* Learned that NestedSlidersPlugin, doesn't play nicely with lists unless I add a blank space after the final "{{{===}}}".
* Got a response from Laurel about the Emergence proposal and meeting minutes.  She likes it a lot and had some substantive comments, including one about complementing systems theory as Ken Wilber recommends (interior, culture, external world).
* Long meeting among local folks who teach NVC, from 11:00am to 3:15pm at Elana's house.  After opening with foot washing/massage, we mostly talked about my [[Emergence proposal|http://emergence.awakeningcompassion.com]].  I'm still in shock about how much support & acceptance I'm getting for my proposal.  After some tweaks, I think it will be ready to bring to next week's core team meeting.  I have hand-written notes.  Strangely, I felt quite deflated at the end.  Some of the sweet energy of [[Friday night's meeting|http://sharenvc.net/meeting_notes/2006-10-27]] was missing.  Or maybe disappointment over running out of people to fight.  I'll type up my notes, and maybe then I'll get some more clarity.
* Our usual NVC practice group night.  A few couldn't come, so I canceled.  Moreah didn't get the message and came by bus.  I met her at Third Place and we had a delightful long talk over hot apple cider.
* Wrote to [[Shelly Farnham|http://www.farnhamresearch.com]].  She does social technology research & consulting and used to work at Microsoft Research.  Now she's involved with Mind Camp and dorkbot.
* Got a call from [[Michael Stillwater|http://www.innerharmony.com/michael.htm]].  Holly sang with him last night and gave him our business card.  He called to talk about creating a synthesis of his intuitive/empathic singing work with Compassionate Communication.  I think we're going to his ~ChantWave tonight on the peninsula.  He was very interested last night in Holly singing with him again.  She came home all lit up from her experience.  I suggested we might catch the ferry over together and chat.
* Wow!  A [[video|http://video.google.fr/videoplay?docid=-2237947353453839215&hl=fr]] of an RC plane with video camera attached, transmitting to //and// controlled by a head-mounted display.
* Transition lunch for Masanda, with the Seattle Unity staff.
* Typed up notes from yesterday's psncc meeting.
* Sent [[note to Sandy about giving one's word]] and NVC Academy.
* Typed up and added [[my notes|http://sharenvc.net/meeting_notes]] on the "Emergence" part of yesterday's meeting.
* Holly & I went to Michael Stillwater's ~ChantWave, which took place at Brian & Lisa's home in Indianola (on the peninsula, near Kingston).  Had a great time.  I loved talking with Michael.  He really gets & shares my vision of trusting in the innate wisdom of life, rather than using control top-down strategies.  Holly got lots of affirmation for her singing & loveliness.
* Talked with Heather H.  Delightful reconnecting chat.  She had an incredibly restorative and blissful time at the Canada retreat.  Maybe get to see her this weekend while she's in town for school.
* On the way to lunch, I talked with Sandy about the proposal and whether to bring to core team next week.
** I'm not clear about what we'd propose.  What is the specific, positive, doable request?
** I explained that I'm not ready to propose an implementation plan and that I want to create the architecture myself, before looking for help.  I have an intuition for how it will all fit together in a very powerful way, with openness and feedback at all levels.  Once I get more detailed clarity about the architecture, then I'll look for ways people can help.  Before there's architecture, I don't know how to give an implementation plan.  Or maybe the plan is this: Conal creates the architecture (possibly with Shelly Farnham's help) and then asks for some implementation help, and then we invite people to contribute their bios etc.
** Sandy suggested that Holly & I do a presentation of the proposal and then ask whether the group wants to move in that direction.  I guess it's just a statement of interest/intention for now.
** We also talked about the intermediate possibility that PSNCC opens up their web-site to folks who aren't certified, and changes the "prerequisites" in the way Sandy wrote up.
** Okay, so next I want to ask Holly for help in creating a presentation for Tuesday evening (core team meeting).  Sandy scheduled 90 minutes to our proposal.  I'd like the presentation to be clear and fairly detailed.  Not as loose (Q&A) as before.
* Oops!  It's a good idea to close all tiddlers being edited before closing the web browser.  Otherwise I could lose some edits.
* Rest day.  Yum.
* Big misgivings about presenting the Emergence proposal to the core team on Tuesday.
* I want to post a note about the Emergence proposal to the [[NVC certification candidates yahoo group|http://groups.yahoo.com/group/nvccertificationcandidates]].
* Oops.  I forgot this morning's certification candidates support call.
* Listening to the [[2006-10-23]] support calls and taking notes:
** [[notes from 2006-10-23 morning certification candidate call]]
** [[notes from 2006-10-23 evening certification candidate call]]
* In reply to a certification candidate, "I also believe that there is great awareness for a need for more assessors to alleviate the bottleneck":+++[response]>
One of the key insights I came away with from reading "Emergence" (by Steven Johnson) is that //any top-down// approach to quality is in direct opposition to growth.  When I consider what will happen under the current certification scheme if we come up with more assessors, here's what I expect:
* More assessors lead to more certified trainers and more trainings of the quality that certification is intended to support.
* Then more people who learn and love NVC and who want to get certified and thus rely more on assessors.
* Again, the assessor supply lags behind the demand, so the bottleneck is back.
Given the effectiveness of NVC, I do believe that exponential growth is possible.  To support quality, we'd need corresponding exponential growth of the assessor pool.  The only way I know to do that is to replace the top-down approach with a bottom-up one, as slashdot did.  Specifically, this proposal recommends removing the distinction between assessors and assessees, empowering and encouraging everyone to assess.
===
* I want to break up the Q&A section of the Emergence proposal into one tiddler per question.  Easier to edit.  More in harmony with the "micro-content" idea.  Simply tag each question as "question" and then use a macro to create the Q&A section.  Maybe there's a way to have them included in how how recently edited.
* Very powerful TW extension here: http://tiddlywiki.abego-software.de.
* Worked with Holly on new Emergence presentation for Tuesday night's psncc core team meeting.  Organized ideas in a tiddlywiki, and then Holly moved it into a nice ~PowerPoint presentation.
* Finished preparations for presenting the Emergence proposal to the psncc core team.  Tweaked the powerpoint slides some.  Holly set up a lovely background and simple transitions.  There wasn't really a lot more to do, but still I didn't find myself able to really move my attention to something else.  Nervous energy, I guess.
* Scott Lewis phoned, in response to some questions.  (Scott is facilitating the CNVC support group conference calls.)  About phone conferencing, he uses http://freeaudioconferencing.com and likes the convenience.  They provide a dedicated line (samenumber each time).  Quieter phone lines than some.  What's in it for the companies? maybe a kickback from long-distance companies.  For recording, he uses audioacrobat.com: easy to do.  give you the link.  $20/month, based on bandwidth. * Voted. Holly & I opted for computerized voting, which we thought would be fun & fast. Turned out to be a bottleneck, as there was only one electronic voting machine, compared to eight or so old-fashioned ones. * Emergence presentation in the evening. I had mixed feelings about doing the presentation, and I have mixed feelings afterward. Still supportive response. Mostly quite positive, and some concerns, which are helpful in seeing some issues. I couldn't really ask for action, as the system isn't real yet. Also, I'm burnt out on all the energy I've been putting in on this project. I want to work on my graphics language projects and make t-shirts. * Catching up on blogs etc. * Called Charlotte & will visit her this morning when Holly gets back from school. * Trying out my new [[Kinesis Maxim split adjustable keyboard|http://www.kinesis-ergo.com/max-spec.htm]]. I want typing to be gentler on my wrists & elbows. From some conversations with my chiropracter, I suspect that my neck & shoulder problems mainly come from my right elbow getting tweaked, which comes from typing. To do: switch the "insert" and "delete" keys, since delete is a lot more useful to me than insert. Now I also understand that a built-in numeric keypad was ergonomically harmful to me. It forced my mouse hand further from the keyboard, which prevented my right elbow from keeping a comfortable bend. Just one look at a photo of a model using the keyboard was enough for me to see how much more comfortable typing could be. * Reflecting on last night's Emergence presentation. One thing stands out as quite a surprise. Two people expressed concern about being left out, given that they're (a) not very computer literate, and (b) have misgivings about allowing people to publicly review them. As I said last night, the last thing I want to do is create yet another system that includes some people and excludes others. What surprises me is how easy it is to create another imaginary power base. "Imaginary" in the sense that it's only an idea (imagining). I've noticed other imaginary power bases (like CNVC or PSNCC certification, or being part of an "in" circle of some kid who starts acting like king), but I haven't been thought of myself as wielding that power. For a moment last night, I got a taste. No thanks. * Found blog articles on serving ~MP3 files and another useful tip. I posted a question on the first, about how to script and what parameters are available. ** [[Embed MP3 Files Into Your Website| http://googlesystem.blogspot.com/2006/07/embed-mp3-files-into-your-website.html]] ** [[Vibe Streamer - Create Your MP3 Server| http://googlesystem.blogspot.com/2006/07/vibe-streamer-create-your-mp3-server.html]] ** [[Play And Convert Any Multimedia File| http://googlesystem.blogspot.com/2006/05/play-and-convert-any-multimedia-file.html]] * Put Tuesday night's [[Emergence presentation| http://emergence.awakeningcompassion.com/other/emergence.ppt]] on-line. * I'd like to sync up a voice recording with a powerpoint presentation. Record my voice and stepping within and between slides. Maybe add a photo of myself to make it a bit more personal. Or one photo per slide. Also, look for a free alternative to MS powerpoint. How about something that runs right in a web browser. * Graphics processors are evolving into general stream processors. See [[this article| http://arstechnica.com/news.ars/post/20061108-8182.html]] on the new ~NVidia 8800. * Learning about [[Odeo|http://odeo.com]] for mp3 streaming in a web page. Reading the forum, I found [[a post on "audio quoting"|http://forum.odeo.com/index.php?pg=kb.page&id=43]] that looks like what I want. Except, it looks like this method only works on //their// server. Posted +++^[note]I'd like to have a web page with a single odeo player and a variety of separate buttons, each of which plays a different audio quote. The audio quote feature is close to what I want, but I think requires (a) one player per quote, and (b) using odeo's server instead of my own. Is it possible to use javascript to cause an embedded odeo player to start & stop at selected time points, and if so, how? Is there another approach that may do what I want?===. Note sure the post went through. I think I could use the audio quoting technique as is. If so, I'd like to make it very convenient for people, by having a form for them to fill out, with start time, end time, and comments. I'd synthesize the html from there. Posted another note about the audio-quote feature not working at all for me. I don't think it went through either. Oh well. * Corresponded with Roger. He's concerned, as I am, that there not be any veiled demands in the "pathway" doc for people teaching NVC. * Installed ~FireFox 2.0. Massively broken for me, though Holly got along just fine. I've lost tabs, my Bookmarks Toolbar Folder, and I don't know what else. I hope I can get it working soon, as I depend on ~FireFox for most of what I do. * I've been feeling discontent again lately. Missing engagement. I like having one or more projects I'm gung-ho about and in the middle of. What would I like to work on? ** Eros project. Improve the paper and implementation. Get a source release out there and encourage people to help. ** Along these lines, learn to organize an open source project. ** Get my art shown at coffee shops. ** Self-organizing FAQ. Yeah! ** Group annotation of audio. * Found blog article [[Annotate online video with Mojiti|http://www.lifehacker.com/software/video/annotate-online-video-with-mojiti-213344.php]]. I've been wanting something like that for annotating workshops etc. * Started <<tag idea>> tiddler on [[self-organizing FAQs]]. * Learned how to have each day's entry automatically tagged with its month. Use {{{<<newJournal 'YYYY-0MM-0DD' {{new Date().formatString("YYYY-0MM")}} 'day'>>}}} in SideBarOptions. The double braces surround javascript code, thanks to InlineJavascriptPlugin. Wow -- what other cool stuff could I do with this ability? * Another [[embedded audio player| http://google.blognewschannel.com/index.php/archives/2006/08/23/googles-mp3-embeddable-player]], thanks to Google and some folks digging around. Oddly, with some files it starts playing right away <html><iframe src="http://mail.google.com/mail/html/audio.swf?audioUrl=http://www.itconversations.com/audio/download/itconversations-1332.mp3" style="border: 1px solid rgb(170, 170, 170); width: 200px; height: 25px;" id="musicPlayer"></iframe></html> and with some others not until the whole mp3 file is loaded <html><iframe src="http://mail.google.com/mail/html/audio.swf?audioUrl=http://slewwho.audioacrobat.com/download/af9c9917-4c0e-0dab-cae1-ba47f4539b82.mp3" style="border: 1px solid rgb(170, 170, 170); width: 200px; height: 25px;" id="musicPlayer"></iframe></html>. Maybe there's a difference in server side support. I know audioacrobat also streams, so maybe I can get another angle. * Looking for focus again. Projects: ** Haskell projects: Eros & Phooey. Implementation & distribution. Use darcs & Daan's extensions. Either on Joseph's server or http://haskell.org. Maybe also learn trac. ** Organic FAQ ** Pictures: mat & frame one for Becky, look for more picture inspirations. * Discovered rounded corners via CSS, e.g., {{{-moz-border-radius: 1em}}}. Used in [[TiddlyWikiTips|http://TiddlyWikiTips.com]]. Using for now in {{{tagged}}} and {{{tagging}}} styles. * I invited Mathias K over for a visit. Once he got here and saw my new ergo keyboard, he had some very specific advice about improving my set-up. One is that keyboards should have the near end higher than the far end, so that the wrists are unstrained. We did a few adjustments while he watched me. I like it! Also see this [[Cornell ergo site|http://ergo.human.cornell.edu/ergoguide.html]]. When Holly came home, we also talked about our Emergence proposal and more broadly about what we want to create in the world, via NVC. * Getting darcsweb going. Having some set-up problems: ** As usual, I can't get a cgi to run when it starts with {{{#! /usr/bin/env python}}}. Replaced with {{{#! /usr/local/bin/python}}}. I don't understand why. ** The CSS and ~PNGs aren't used. When I try to look directly at the .css in the browser, I see that it's getting executed. See comment in [[setting up CGIs]]. * Got darcsweb running. See {{{Directory}}} tag incantation in [[setting up CGIs]]. The other trick was to really specify the path to darcs. [[It's very pretty|http://conal.net/darcsweb]]. Next tackle darcs-server. * From Wikipedia on [[Nikos Kazantzakis|http://en.wikipedia.org/wiki/Nikos_Kazantzakis]]: > His epitaph read "I hope for nothing. I fear nothing. I am free." * What does sociocracy say about paramount objections to an //existing// policy? I have some about psncc's policies around who gets to put their picture & bio on the web-site and advertise offerings. My understanding of "paramount objection" is an objection that is so strong that I would withdraw my life energy from the shared undertaking. That's what I was doing before Sandy joined PSNCC. Since then I've been in a wait&see holding pattern, plus some effort at changing the status quo. And now I'm in that same place with CNVC as well. Sent query to John Buck. * In the wee hours of the morning, I started <<tag essay>> on [[NVC certification and violence]]. Of course the name would provoke a defensive reaction, so I'll want to change it before sharing. I was irked and couldn't sleep. * Erika J mentioned: when employees are asked what they most want in a job, money is not in the top ten. Instead, things like working with people they like. But when they don't get what they most want, //then// they ask for money. As a compensation. I relate to that in my own life with Holly. I don't care much about cost of living when I'm really content with meaning, connection, contribution, and creative accomplishment. When I'm not getting these things I really want, //then// I get cranky about our cost of living. * Charlotte's birthday. We went out to lunch and hung out & danced & stuff. Then I played with Audrey while Char & Eric had their first counseling appointment. * Attended the small business startup, rather by accident. Had a great time. Did wonders for my self-confidence. * I set up a Google Calendar "Northwest Compassionate Communication". I suspect that we really want two different calendars. One would be visible to everyone and would contain all CC (NVC) workshops in our geographic area. A second one would be of more limited interest, to folks who share CC. I'm especially excited about the first one, since it can be easily updated at any time with new offerings (or canceled ones) and new info about offerings. Also because it is decentralized and so is one easy & concrete step from top-down control to openness. * I've been making some big shifts in my attitude about PSNCC and CNVC. Some progress in letting go of my attachment to them as strategies for inclusion, community, contribution, and support. I'm noticing that my attachments get me into frustration, conflict, and demand. What I want is peaceful allowing these organizations & people to think & act exactly as they do for as long as they do. And for myself to create & co-create communities that do support my exploration and work. Some steps: ** Create a "Northwest Compassionate Communication" Google calendar and invite others to post their offerings, rather than waiting to be welcomed to post my offerings on the psncc list. (Though now Sandy is requesting that the psncc list be opened up.) ** Call my Emergence vision a "plan" (or ???) rather than a "proposal", as the latter means to me that I'm requesting approval. As long as it makes sense to me, I'm going to do it with or without psncc or cnvc encouragement. Look for a stronger word than "plan". ** Write essays and post them on a blog or wiki. Invite comments. ** Create a wiki for discussing NVC theory, since I've been dissatisfied with existing mailing lists & wikis. ** Post pointers to my resources on http://nvcwiki.org. Copy some content in there as well, and be bold about it. Don't rely on others' acceptance though. ** Clearly and consistently use language the way I like to use it and want to hear it used. If I want to use "trainer" to mean one who trains, then do it and allow psncc and cnvc folks to get confused or uncomfortable. I could even use "certified" to mean that //someone// has "attest[ed] to as the truth or meeting a standard". For instance, I'm self-certified to teach NVC and ~CMU-certified as a ~PhD computer scientist. To speak more specifically, I'd say certified by whom for what. Be the change I want to see in the world. This clarity of speech is very important to me in shifting from power-over to power-with. I believe that implicitness of //relationship// is one of the language tricks of power-over, and part of the brilliance of Marshall Rosenberg's teaching is to make those relationships explicit. For instance, translating "That book was terrible" to "I'm disappointed with that book". * Note to Roger & Moreah about this shift. +++[Excerpt]> I'm delighted with the changes that are starting to happen or at least being discussed in psncc. At the same time, I'm realizing that I want a supportive community whether or not psncc or cnvc can play that role for me. My "Emergence" exploration has clarified for me that the power of organizations like psncc or cnvc is purely by mutual agreement. I think that's why I've become so emotionally reactive to the word "trainer" being co-opted by psncc folks to mean "person whom some of us have approved to train", rather than its common meaning of "one who trains". And cnvc folks do the same but with a different "us". A Marshall-style reframe of the jackal judgment/labeling "You are (or are not) a trainer" might be "I feel comfortable (or uncomfortable) recommending you to people for NVC training". Or it might be any of a variety of observations, feelings, needs, or requests. I'm remembering Marshall's statement [["Never let someone in authority tell you what you are"|http://www.cnvc.org/downlds/20020714.mp3]]. Anyway, with increasing clarity & confidence, I'm brewing intentions & manifestations of effective support and inclusiveness and release of my personal resistance to other people's thinking, speaking, or behavior. Rather than changing current institutions, I want to create alternatives that work better. And stay connected with the people I like in those old institutions, calmly & clearly sharing what I'm up to without any request for approval.=== * Play date with Sandy Fox at our house. Talked a bunch about where we are and where we want to go with PSNCC. Very fun & connecting also. * Made AudioPlayerPlugin with {{{audioPlayer}}} and {{{marshallism}}} macros. Example: <<marshallism 20020714>>. Big test at [[Marshallisms]], but doesn't load well. What I really want is to click on the clip name and have the player materialize. Added {{{audioSlider}}} and {{{mbr}}} macros, but they don't work. Maybe the {{{<iframe>}}}? Experiment and see. Ask for help from TW folks. Another problem I suspect is that the player using doesn't really stream the audio from the source, so it all gets downloaded before the player starts up. I'm still not sure about that. * Found [[CivicSpace|http://civicspacelabs.org/civicspace/features]], a Drupal-based community-organizing platform. Looks pretty useful. Supports blogging, forums, polls & surveys, files, photo galleries, social networking, contact management, event listings. Related: [[CiviCRM|http://civicrm.org]]. * Working with darcs-server again. Stuck. Asked Daan for help. * Experimenting with iframe and nestedSlider. Worked okay on my home page as the iframe source, but not with the google audio player. * Tweaked my MainMenu so that it gives me a tag for the current month: {{{<<tag {{new Date().formatString("YYYY-0MM")}}>>}}} (using InlineJavascriptPlugin). * NVC marketing meeting. Very informative. Some I enjoyed learning. Overall, I felt nauseous and disheartened. So much of marketing I hear as manipulation -- skillfully imposing my personal will on other people's. Especially in pressuring people to make quick decisions. * Replaced "proposal" with "plan" in [[Emergence|http://emergence.awakeningcompassion.com]], now that I have enough clarity & confidence to know I'm going to pursue realizing the Emergence vision. * Blog finds: ** [[Locate a mailbox|http://www.payphone-project.com/mailboxes]] ** [[Sync your Google Calendar with your cell phone| http://www.lifehacker.com/software/top/sync-your-google-calendar-with-your-cell-phone-213886.php]] ** [[Skype 3.0 Beta: Facelift and a Boob Job| http://www.gizmodo.com/gadgets/software/skype-30-beta-facelift-and-a-boob-job-213286.php]] ** [[10 Reasons You Should Never Get a Job (Steve Pavlina)| http://www.stevepavlina.com/blog/2006/07/10-reasons-you-should-never-get-a-job/]] * Pat's birthday!! <<smiley :)>> Pat, Becky, Charlotte, Audrey, Holly & I went to Denny's in Ballard. Then presents & hanging out. What I'd like to accomplish today: * Progress on using darcs-server * Isolate the problem with the {{{audioSlider}}} macro and send note to TW list. * Design an NVC offering. * Met Brian at Hotwire. He likes Daniel Quinn and saw my copy of //The Story of B//. He recommended two other books: //Sophie's World// and //Art of Passion//. Placed a hold on the former. Couldn't find the latter. * Made a new <<tag project>> category (tag). Added [[Emergence of NVC]], [[Eros]], [[Phooey]], [[TiddlyWiki hacking]]. * Poking around with darcs-client and darcs-server, to determine why darcs push is failing. I want much better diagnostics. There is a verbose mode for darcs-server. Find out how to activate it. A possible problem: my repo permissions are {{{rw-rw-r--}}}, and the darcs-server perl script probably runs as user apache, which would not have write permission. As an experiment, I tried {{{chmod -R u+w Eros}}} in my {{{darcs}}} directory. Same error. Added {{{-v}}} to {{{DARCS_APPLY_HTTP}}}. Aha! Found a +++^[relevant message]{{{Sat Nov 18 19:56:55 2006] [error] [client 24.41.41.187] Can't locate /home/conal/.darcs-server/darcs-server-lib.pl in @INC (@INC contains: /usr/lib/perl5/5.8.0/i386-linux-thread-multi /usr/lib/perl5/5.8.0 /usr/lib/perl5/site_perl/5.8.0/i386-linux-thread-multi /usr/lib/perl5/site_perl/5.8.0 /usr/lib/perl5/site_perl /usr/lib/perl5/vendor_perl/5.8.0/i386-linux-thread-multi /usr/lib/perl5/vendor_perl/5.8.0 /usr/lib/perl5/vendor_perl /usr/lib/perl5/5.8.0/i386-linux-thread-multi /usr/lib/perl5/5.8.0 .) at /home/conal/domains/conal/htdocs/cgi-bin/darcs-server.cgi line 19.}}}=== in the Apache error log. Oh! The apache user +++^[can't read the darcs-server directory]...{{{ [conal@feijoada darcs]$ sudo -u apache ls -l /home/conal/.darcs-server/darcs-server-lib.pl
ls: /home/conal/.darcs-server/darcs-server-lib.pl: Permission denied
}}}===, because I followed Daan's set-up directions, which say to do {{{chmod 700 .darcs-server}}}.  Got further to another error message.  Emailed Daan.

* Church @ CSL
* Lunch with Holly at Tai Ho.
* Hanging out at Hotwire.
* I want to get a men's group going.  See if Ravi is interested and ask him if he knows others who might be.  Who else?  Look into advertising in The Stranger and Seattle Weekly.  Do I want to lead?  I thought I did, but I'd rather get support for myself.  My CA men's group worked great without a facilitator.
* Got this clarity from a conversation with Holly.  Here's why it's worthwhile for me to take care in my speaking when I'm in pain.  What I want is for the other person to attend to my pain.  If my words stimulate pain in the other, however, then I've given them something //more interesting// to focus on than my pain, namely their own.
* Tried out FileDropPlugin.  Looks like wonderful fun, but apparently depends on TW 2.1, and ZiddlyWiki isn't quite there yet.
* +++[Note to TiddlyWiki group about my experience with journaling in TW.]>
This note shares a personal TW practice I've adopted.  Others may enjoy it as well.  Comments & suggestions are very welcome.

For the last 20 years, I've been keeping a journal of my daily activities & thoughts. I started in grad school when I noticed how often I felt discouraged at the end of a work day, not having a sense of accomplishment or clarity about what I've done with my time.  The journaling helped a lot, and I've been keeping it up since.  All that time, my journal has been in ascii, via GNU Emacs.  Since discovering TW recently, I've switched over my journaling from ascii & emacs to TW, and I'm delighted with the change.  When I want to do a lot of writing/editing, I use Chris Klimas's [[Twee emacs mode| http://gimcrackd.com/etc/src/#Twee]].

My newJournal incantation tags each entry with its month and the word "day":
{{{
<<newJournal 'YYYY-0MM-0DD' {{new Date().formatString("YYYY-0MM")}} 'day'>>
}}}
Similarly, each month is tagged "month" and the year, and years are tagged with the word "year".  When I create a new topical tiddler (an idea, how-to, essay, etc), I tag that tiddler with its creation date.  My MainMenu has a tag for the current month.  I intend to use these month & year tiddlers to capture monthly & yearly plans and accomplishments.
{{{
<<tag {{new Date().formatString("YYYY-0MM")}}>>
}}}
Most fun of all, with this tagging in place, I use the {{{siteMap}}} macro to get a structured view on all I've been doing and thinking.

The result is at http://journal.conal.net.

BTW, copy&paste from a rendered tiddler into gmail works great.

With much gratitude to the TW community,

- Conal
===
* I wonder: could it be practical and powerful for tiddlywiki to use javascript directly as its language for macro invocation?  More precisely, JS is the language for //defining// macros, but two different little languages are used for //invoking// macros: one from wiki markup ({{{<<foo ...>>}}}), and one from template html ({{{macro='...'}}}).  I don't know to what degree these invocation languages support composition.
* Futzing with the LAN again.  This time I shut down the router, all of the computers, the network server //and// the cable modem.  I turned on the router, then my computer, and I could see the local network from my machine.  When I plugged the print server back in, my print job came out.  Differences that I'm aware of relative to previous tries: turned off the modem, shut down all computers.  Next time, try the modem specifically.
* Mathias visited.  We had a long talk, and he gave me some great inspirations.
* Tasha pointed me to her response to a [[LiveJournal post|http://community.livejournal.com/weddingplans/9003589.html]] about an engaged couple's conflict over last names.  I wrote +++[this response.]>
Wow -- I'm worried about the three of you after reading your note.  I'm glad you have another year to go before the wedding.  Conflict is inevitable, and how you two handle conflict (or can learn to handle it) says a great deal about how happy your lives can be together.

I'd like to offer another perspective on compromise, namely that it's //poison// to relationships.  And it's never necessary when we get clear about what's really important to us underneath our positions, preferences and demands.  My partner & I work with couples, and we encourage and help them to keep working on understanding and expressing their underlying needs, until they can joyfully let go of their current strategies in favor of ones that meet all of the present needs //fully// (no compromise).  The magic of this approach is that you both end up happy, //and// you're more intimately and compassionately connected than before the conflict arose.

In contrast, "compromise means the resentment gets shared 50/50", leaving a bitter residue that gradually poisons your relationship and self-respect.  Most couples play this game until they divorce or, worse, live out their life sentences of disappointment.

The "50/50" quote is from the book "Don't be Nice, Be Real" by Kelly Bryson. //Please// read that book with your fiance and use the principles.  Also "Nonviolent Communication" (NVC) by Marshall Rosenberg.  (Where "nonviolent" means getting past habits of judgment, blame, demand, diagnosing, etc, to clear & heartful thinking, speaking, and listening, so that mutual support becomes possible and joyful again.)  Better yet, find someone in your area who teaches NVC and/or can provide counseling/mediation using NVC.  Or phone up my parter & me, and we'll help you (with or without your fiance) get oriented, free of charge.  The same offer  goes to anyone else reading this note.  Our phone # is 206.364.5063, and our web site is [[awakeningcompassion.com|http://awakeningcompasion.com]].

If you do seek counseling from your pastor, I hope you're watchful about his/her own orientation and examine whether it's compatible with your own.  Does he/she reinforce the patriarchal values of ownership/domination of women that you'd like to move beyond?

Heartfelt best wishes,  - Conal
===
* Last week I ordered a 120GB Viewsonic Wireless Media Gateway, which is a wireless router with a hard drive and two USB ports for printers or other devices.  It gets us on the internet fine, and the file system shows up, but I haven't gotten the printer interfaces to work.  [[Searched| http://www.google.com/search?q=viewsonic+wireless+media+gateway+mygroup] and found [[someone with the same problem| http://www.smallnetbuilder.com/index2.php?option=com_content&task=view&id=24719&pop=1&page=0&Itemid=80]].  After several tries, I called ViewSonic customer support (800-688-6688).  Did not find a solution.  Case # VSA-061122-0521.  Escalated (to a supervisor I guess).  I suspect the problem comes down to access permissions.  I'm going to try something else.  Edit {{{/LPRNG/etc/samba/smb.conf}}} on the device to make the group match our workgroup (mshome) rather than "mygroup".  The catch is that I don't have permission to edit that config file.  Working on giving myself permission.  Failed.  I don't think it's really about permissions, since I'm able to get to write to {{{/media}}} but not {{{/LPRNG}}}, and they show the same permissions.  Maybe {{{/LPRNG}}} is really in ROM.
* Revisiting [[Phooey]], starting with my paper draft.  (See [[Phooey]] tiddler for details.)
* Having another go at making t-shirts.  My last batch bled terribly in the washer.  I forgot about them and let them sit wet all day on the first washing.  There are a lot of variables, and I don't know what's most important.
** print setting (//photo quality glossy film// vs plain paper),
** heat press time, temperature & pressure (45 seconds, medium-hard, at 325 degrees),
** washing (wait two days, dry on gentle for 5 minutes, then wash on gentle, promptly remove and dry on gentle).

* Talking with Holly this morning, it became crystal clear that I want to diversify the support & personal connection in my life.  I created a tremendously broad support base for myself six years ago in preparing to end my marriage, and now I have just Holly.  No wonder I'm lonely and discontent.  As a first step, I'm starting up a men's group.
* Men's group potential members: Mathias, Rob Jennings, Pan Vera, Mitch Albala, Scott Steinhorst, Roger C.  Mathias is already in.  I talked with Rob, and he's in.  Left messages with the rest.
* Reworking my [[Phooey]] paper.  Started over fresh, moving my current {{{~/Phooey}}} directory to {{{~/Phooey/Old}}}.  I'm taking a much more top-down approach than previously.
* Idea: use Phooey to make a simple implementation of the rendering part of tangible values (not the interactive composition part).  Hopefully very simple, as there's a good deal in common.
* Switched my Emacs tex mode to use longlines.  I really dig longlines.

* Imagine presenting [[Eros]] as follows:
** Visualizing functional values
** Combining user-friendliness //and// composability (rather than choosing between them).  Yes -- this angle is probably the most significant, from a computer-science perspective.  That composability could be programmatic, using the Eros algebra.  It can also be gestural.
** Observe the role of //types// in composability.  If the type is something like //IO ()// or //UI () ()//, then we're not going to be able to compose.  A type like //IO a// may let us get info //out// but not in.  Even then, a //complete// UI would have type //IO ()//, which shows that we can no longer compose.
* Men's group progress.  Roger and Scott are very interested.  Scott is free weekday evenings other than Monday (and other than our NVC practice group 1st & 3rd Wednesdays).  I'll talk with Mitch tomorrow.
* [[reply to Shulamit Day on 2006-11-26]]
* Note to TiddlyWiki group: +++[filtering tag lists?]>
I'd like to filter out (hide) some tags generated by the tagChooser macro and under the "All" tab in SideBarTabs.  Specifically, I want to hide my daily (and maybe monthly) journal entries, i.e., tags having corresponding tiddlers bearing the tag "day" (and maybe "month"). Any hints?===
* Men's group: I talked with Ravi at CSL.  He wants in.  Monday & Thursday evenings look good for him.  He'll be leaving for Austria on Dec 13.
* [[Phooey]] paper: do I want the paper to be the implementation?  Tempting, but tilts the paper toward being very detailed.  Keep the paper & implementation separate for starters, and then consider merging them.
* Men's group:
** Teo wants in
** Mitch is interested and had some reservations but wants to give it a go.  He wondered whether it would be ~NVC-centric, and if so, maybe it //and// the practice group would be more NVC than he wants.
* Installed GHC 6.6 and switched my {{{PATH}}} from 6.5 to 6.6.  Oh, now I need to recompile wxHaskell and re-add various packages.  Backed off to 6.5.  Try again later.
* Trying out {{{mkcabal}}} for Phooey.  Investigated, found & fixed a problem, and wrote to Don S.  For now, I'll go with LGPL for the Phooey license.
* Getting new Phooey together
** Depends on the monad transformer library.
*** I haven't know how to properly describe this dependency.  I'd had the local source copy wired into my makefile, which certainly won't do.  Looked and found http://www.cse.ogi.edu/~diatchki/monadLib.  The latest versions have Cabal support.
*** No version of monadLib quite seems to match my ghc/cabal version.  Old ones don't use hierarchical library module names or do inconsistently.  The latest versions (2.0 & 2.0.1) yield {{{Unknown field 'extra-source-files'}}} from my cabal.  I guess I have an old cabal.  For now, I commented out the extra-source-files directive.
*** The standard Cabal incantation worked, though it put the library archive in {{{c:/Program Files}}}, which I don't like.  Instead, use {{{--prefix=c:/Haskell/packages}}}.  +++[Full version:]
{{{
}}}
===
** Similarly, grabbed the arrows package.  My cabal failed on it also.  I can comment out parts, but instead, I'm going for the latest cabal, at {{{http://darcs.haskell.org/packages/cabal}}}.
** Got latest Cabal; unregistered Cabal-1.0; followed build directions in README.  Worked!  Now I have Cabal-1.1.7.
** Returning to arrows, configure failed, wanting base>=2.0.  Now I'm really tempted to switch to ghc-6.6, which probably has base-2.0.  First, try getting and building the latest base package via darcs.
** I don't know how to build the {{{base}}} package.  Switching to ghc-6.6 instead.
*** My first attempt failed: {{{Unable to find the 'wx-config' program: wx-config}}}.  I'll re-build wxWindows.  (In retrospect, I could have searched for wx-config and added it to my PATH, as the rest of the error message suggested.  Maybe I lost the path setting when I rebuilt my hard drive last year.)
*** Re-building wxWindows.  This time with {{{--with-opengl}}}, which I missed last time I built.  I do want opengl.
**** Returning to build wxHaskell, this time wx-config was found.  +++[configure.]{{{./configure --with-opengl  --prefix=c:/cygwin/usr/local}}}===  Broke immediately.  I started with a fresh copy of wxHaskell.  Then I got +++[this.]
{{{
ghc -c wx/src/Graphics/UI/WX/Types.hs -o out/wx/imports/Graphics/UI/WX/Types.o -iout/wx/imports -odir out/wx/imports -hidir out/wx/imports  -fvia-C -package-name wx -iout/wx/imports

wx/src/Graphics/UI/WX/Types.hs:94:0:
Something is amiss; requested module  wx:Graphics.UI.WXCore.Types differs from name found in the interface file wxcore:Graphics.UI.WXCore.Types
make: *** [out/wx/imports/Graphics/UI/WX/Types.o] Error 1
}}}
===
* Lots of snow last night!
** Found an [[explanation and solution| http://sequence.complete.org/node/214]] for the build problem I ran into yesterday.  I also saw some messages about patches to make wxHaskell compile under ghc-6.6.  Where?  Oh, of course: {{{http://darcs.haskell.org/wxhaskell}}}!
** Tried using {{{--prefix=c:/Haskell/packages}}}.  C++ compilation couldn't find lots of .h files.  Switched to {{{--prefix=/usr/local}}}, which works fine.
** {{{make install}}} failed, saying {{{out/wxc/ewxw_main.d:1: *** multiple target patterns.  Stop.}}}.  I moved those .d files out of the way (into a new "d" directory) and retried.  Same problem with {{{out/wxc/ewxw/*.d}}}, so I moved them, too.  Succeeded.
** Got instructions {{{Done with wxcore... Now please run make wx and make wx-install}}}.  Did these two steps.
* I want to use haddock for my code.  darcs-got the latest.  It looks to be cabal-ready.  Oops -- haddock requires alex, which requires happy.
* For executables (happy, alex, haddock), use {{{--prefix=c:}}}, which dumps the exes in {{{c:/Haskell/bin}}}.  Added that directory to my PATH.  All built & installed fine.
* Re-built & re-installed {{{monads}}} ({{{monadLib}}}) and {{{arrows}}} packages, configuring with {{{--prefix=c:}}}, for consistency with executables.
* Got Phooey cabal working, all the way through installation (including ghc-pkg)!  Now I can start filling in the code.
* Emergence:
** Mel shared an exchange on the cnvc-trainers discussion group about some pain & ideas around re-certification.
** Sandy suggested it would be an opportune time to share Emergence with them, which I was also thinking.  Mel offered to pass on a note to them
** I want discussion to be inclusive of people, whether or not cnvc has certified them, so I started a new yahoo group: [[emergence-of-nvc| http://groups.yahoo.com/group/emergence-of-nvc]].
** Wrote this note for Mel to pass on to the cnvc-trainers yahoo group.+++
Our local NVC network has been exploring an alternative to certification.  While there are still more details to iron out, there's considerable interest in our group at this point.  I expect to prototype this alternative locally and then offer it globally.

The full, evolving, collaborative plan is at http://emergence.awakeningcompassion.com.  To stimulate your curiosity, here is an excerpt:
<<<
Our goal is to support learning, living, and sharing of NVC consciousness.  We see tremendous potential for growth, and so we want to create a system that nurtures that growth.  The main parts of the plan are as follows.
* Create a web-based matching system that helps connect people sharing and seeking NVC.
* Focus on supporting rather than evaluating.
* Create and nurture a learning community.
* Enable rapid growth of NVC consciousness by replacing the top-down assessment bottleneck with self-supporting community feedback.
* Support the continuing evolution of the NVC process.
<<<
This plan is evolving and I welcome feedback, so as to continue improving it to meet needs around sharing NVC as effectively and joyfully as possible.  To facilitate discussion, I've created a new discussion group: http://groups.yahoo.com/group/emergence-of-nvc, and I encourage you to participate.
===
** Tomorrow, make an entry on http://www.nvcwiki.com.

* [>img[http://conal.net/Pan/Gallery/transparent/power-xorgon.png]]Emergence:
** Mel sent out my note to the cnvc-trainers group.  One new member of emergence-of-nvc.
** I want my email address on the new group to be conal@awakeningcompassion.com, so I set that up.
** Added my [[emergence logo / power design| http://conal.net/Pan/Gallery/xorgonRings/with-center/g.png]] (at right) to the [[emergence-of-nvc group page| http://groups.yahoo.com/group/emergence-of-nvc]].
** Added an Emergence link on the nvcwiki [["other wikis" page| http://en.nvcwiki.com/index.php/Many_NVC_wikis]].
** Wrote a group welcome message.+++>
Welcome to the emergence-of-nvc discussion group.

I set up this group to support inclusive, transparent, and creative conversation around new, more effective directions in supporting the learning, living, and sharing of NVC consciousness.  The name "Emergence" comes from study of self-organizing systems, as found in living organisms and collections of organisms.  Emergent systems use primarily bottom-up mechanisms rather than top-down control.  They are thus deeply in harmony with NVC's "Power With" orientation, and they use growth to support quality rather than placing quality and growth in opposition.

As a starting point for this conversation, I encourage you to read and respond to the notebook at http://emergence.awakeningcompassion.com.  In case you're unable to view that notebook, you can get a PDF version (perhaps without the latest changes) at http://emergence.awakeningcompassion.com/other/emergence.pdf.

Warm regards,
Conal Elliott
conal@awakeningcompassion.com
http://conal.net
http://awakeningcompassion.com
===
** Jeff Brown sent a reply to Mel's forward of my note pointing to Emergence.  I think he sent it to the cnvc-trainers group.  I responded, asking to shift the conversation to my new discussion group.+++>
Thanks for the inquiries.  In discussing them, I want conversation to include folks with and without cnvc certification, so I'd like to redirect the conversation to an open forum.  Would you be willing to re-send your note to emergence-of-nvc@yahoogroups.com, and suggest to the cnvc-trainers group that they go there for discussion?
===
** From the Onion, here is [[another perspective| http://www.theonion.com/content/node/55807]] on integrity vs evolution.  I added a link in the Emergence site [[here| http://emergence.awakeningcompassion.com/#%5B%5Bevolution%20of%20the%20NVC%20process%5D%5D]].  I wonder whether it will amuse or annoy more.  Sandy loved the Onion article and sent a copy to psncc folks.
* Still frozen & snowy & lovely outside today.  Been having a peaceful & pleasant day.  Holly's school is closed today, and we just finished watching Contact, a favorite movie for each of us.
* Got some help & camaraderie on the #haskell irc chat room.  Solved the problem I had yesterday with running Phooey.  Added {{{/usr/local/lib}}} to {{{PATH}}} (not {{{LD_LIBRARY_PATH}}}).  Then found that my output strings are getting truncated to their first character.  lispy on #haskell and later Daan Leijen on IM suggested that it's a Unicode issue.  Tomorrow, ask for help on the wxhaskell-users list.
* Chatted with Daan on IM.  He helped me with darcs, darcs-server, and wxhaskell problems.  I got the latest darcs-server patches.  Tomorrow, grab a fresh darcs from http://glozer.net/darcs.  See if my "plink" warning message goes away.  Recompile darcs-client, and put the latest darcs-server onto Joseph's server.  Do something about giving darcs-server permission to write to my repository.  Give {{{darcs push}}} another go.
* More Emergence: I saw that Jeff Brown posted his note to the cnvc-trainers & nvccertificationcandidates groups, I replied to the latter group, asking to move the conversation to emergence-of-nvc, so the conversation can include everyone.  Now the group has seven members.
* Last week, I finally ordered the [[head-mounted display| http://www.3dvisor.com]] I've been wanting for many months.  It was supposed to arrive today, and it got on the FedEx truck in Bothell just a few miles away, but I guess they had snow/ice trouble.  The tracking site says "Delay beyond our control" and back to the Bothell FedEx facility.  Curiously, the shipment originated in Issaquah, also nearby.
* By the way, last night Holly & heard Ze Frank (very funny [[video blogger| http://www.zefrank.com/theshow]]) pronounce Issaquah accenting the second syllable and pronouncing it "saw", rather than the first and "suh" as we do here.  I wonder if Ze's pronunciation is closer to the native people's.
* Rebuilt wxWidgets with {{{--enable-unicode}}}.  Same problem.  Rebuilt with {{{--disable-unicode}}}.  Ditto.  Responded to Eric Kow & wxhaskell-users group giving more info and asking for more help.
* Emergence:
** Connecting correspondence with Shulamit Day, who lives in Ottowa.  We shared some rants about certification and some dreams.
** Now 11 members in the yahoo group.  Two messages.  Messages from Susan Skye (CNVC Certification Coordinator for North and South America) and Jim Manske (member of CNVC Global Coordinating Committee and husband of CNVC acting executive director) that went to other groups and will hopefully end up on the new group.  I like & respect Susan, and I want a clear & caring connection with her in this discussion.
** Got help from Mel and from Jeff Brown in rerouting messages from cnvc-trainers to emergence-of-nvc.  I appreciate Jeff's spirit and carefulness in communicating.
** Added an emergence-of-nvc link on the nvcwiki [["Mailing lists" page| http://en.nvcwiki.com/index.php/Mailing_lists]].
** Improved the group blurb.
** Wrote [[reply on the "bottleneck" on 2006-11-30]].

* I learned that Scott Fahlman [[invented the smiley| http://research.microsoft.com/~mbj/Smiley/Joke_Thread.html]] {{{:-)}}} in a CMU online conversation, one year before I got to CMU.
* More activity on [[emergence-of-nvc|http://groups.yahoo.com/group/emergence-of-nvc]].  Now 18 members.
* Unpacked my new [[head-mounted display|http://www.3dvisor.com]].  Installed the software.  Works with my  Acer notebook and Samsung Q1, though not 3D stereo, since I have ATI rather than ~NVidia graphics.  The [[3dvisor forum| http://www.3dvisor.com/forum]] contains [[a thread| http://www.3dvisor.com/forum/viewtopic.php?t=96]] about a virtual desktop app that gives one a very large virtual desktop and look around with head motion.  [[Found| http://kolbysoft.googlepages.com/surroundsight]] on the author's [[web site| http://kolbysoft.googlepages.com]].
* Problem with the [[Emergence site|http://emergence.awakeningcompassion.com]].  The name server stopped resolving my subdomain name.  A week or so ago, I switched registrars for awakeningcompassion.com from yahoo to 1and1.com.  When I did that, I stayed with 1and1's name servers rather than using {{{NS1.TWISTED4LIFE.COM}}} and {{{NS2.DIGITALSPECTRUM.BIZ}}} as Joseph helped me select for conal.net.  My guess is that the 1and1 name servers won't default subdomains to the domain's IP, while the two from Joseph do.  I added the subdomain through the 1and1 interface, and the problem was fixed.  As an experiment, I also switched name servers for nvc-connection.net, to see if it will pass through unknown subdomains.  After the nameserver change propagates, try http://foodle.nvc-connection.net.  If I get "bad gateway", I'll know it worked and that I want to change over other nameservers as well.
* Look back at my email about changing registrar from yahoo.  I wonder I need to cancel or I'll yahoo will charge me for another year.
* My nvc-connection.net subdomains are not yet getting forwarded, after the name server switch.  I started writing Joseph a note, when I realized that the changes might not have propagated to dns servers yet.  Try again later today and tomorrow.  Learn about how all this stuff works.
* Fixed my TW printing style sheets.  I had various sizes tuned to compensate for a funny non-standard setting in my browser.  Turned off header printing in my journal.
* Since installing the 3dvisor, my Q1 has had very unreliable internet access.  It connects to the local network fine, but not the internet.  Other computers on the local network aren't having this problem, even my notebook, where I've also installed the 3dvisor software.  Puzzled.
** As an experiment, I removed the 3dvisor software from the Q1's startup list and rebooted.  Now I can view some sites and not others.
** Hm.  I don't think I have any kind of antivirus on the Q1.  Install Spybot and Clamwin.
** Coincidentally, ~IE7 installed, including some anti-malware software.  After installing & rebooting, my internet access problems seem to have vanished.
* I think I understand how to make huge windows in [[FullSize|http://www.fanix.com/fullscreen.html]].  Use the "free sizing" feature (toggle on from control-right menu in a window), together with resizing.  Keep moving the window down & right and resizing up & left.  Tedious but workable.
* Playing with the 3dvisor and reading ~PDFs by moving my head around.  I can make the window taller than the screen (by using ~FullScreen), but then Acrobat Reader won't fill in content more than one screen height.  Look for another PDF reader.
* Heard back from Eric Kow that he uses wxWidgets-2.6.3 with ghc-6.6.  Got wxWidgets-2.6.3.
** {{{../configure --with-msw --disable-shared --with-opengl --enable-unicode}}}
** ...
* CSL this morning.  Holly sang bax.
* Lunch at Charlotte's restaurant.  Then visited with Charlotte during her long afternoon break.
* Apparently GIMP 2.2 can make ico files, handy as tiny web site logos.
* Trying [[Foxit Reader 2.0| http://www.foxitsoftware.com/pdf/rd_intro.php]] as an alternative to Adobe Acrobat Reader.  See if it will let me make really huge windows, for reading with the 3dvisor.  No.  It behaves the same way: the window can be very tall, but the content won't cover more than a screen height's portion of the window.  Odd!
* Emergence:
** Responded to Susan Skye.+++>Hi Susan,

I appreciate your inquiry, as I value clarity highly.  Since I want an open conversation, the cnvc-trainers list isn't the best fit for me to have the discussion.  Secondly, it would be inconvenient for me, since I'm not allowed access to the cnvc-trainers e-group.  Rather than my forwarding messages to you and others, you might instead visit the new group (http://groups.yahoo.com/group/emergence-of-nvc) and read messages without having to join.  If you want to post a reply or have the convenience of messages coming to you, it's quite easy to join (open to all, with no approval process).  Also, temporarily, at least, Dominic has offered to cross-post to cnvc-trainers.  Do these options work for you?

Warmly,  - Conal===
** Replied to Shulamit
* Phooey:
** Rebuilding wxHaskell to work with wxWidgets-2.6.3.  Loads of error messages.  Report & query to wxHaskell-users list.
* 3dvisor:
** Registered on the 3dvisor forum site.  Posted [[a note| http://www.3dvisor.com/forum/viewtopic.php?t=656]] asking about the status of ~SurroundSight Virtual Desktop and whether I could get the source code to hack on.
** Thinking about how to read documents with the visor.  I can visually scan some amount of text with ~SurroundSight, but I still want a way to move to the next page.  The visor picks up [[yaw, pitch and roll| http://www.egms.de/figures/journals/cto/2005-4/cto000011.f2.png]] ([[see also| http://liftoff.msfc.nasa.gov/academy/rocket_sci/shuttle/attitude/pyr.html]]).  Use yaw & pitch for 2D scanning and roll for page turning.  Tilting the right ear to the right shoulder steps to the next page, while tilting the left ear to the left shoulder steps to the previous page.  Let the user tune the angle threshold that signifies a page turn.
* [[TiddlyWiki for the rest of us| http://www.giffmex.org/twfortherestofus.html]] ("You will never find an easier entry-level TiddlyWiki instruction manual than this!")
* Idea: [[safe & efficient internet computing]]
* 3dvisor: continued [[disucssion| http://www.3dvisor.com/forum/viewtopic.php?t=656]] of ~SurroundSight.
* Phooey:
** Working with haddock.  Got help from Simon Marlow in response to yesterday's note.  In ghc-6.6's {{{package.conf}}}, replace {{{$topdir\\html}}} with {{{c:\\ghc\\ghc-6.6\\doc\\html}}}. Fix expected in ghc-6.6.1. +++^[Haddock'd]{{{runhaskell Setup.hs haddock}}}=== packages: monads, arrows ** Stopped resisting the Windows defaults for cabal & haddock. Now do {{{runhaskell Setup.hs configure}}} with //no arguments//. ** Oh, rats. That doesn't work out either. When haddock'ing Phooey, I get +++[this.] {{{ Warning: cannot use package arrows-0.2: HTML directory "C:\\Program Files\\Common Files\\arrows-0.2\\doc\\html" does not exist. }}} === I sent in a query to the haskell list. Here is the +++[winning incantation.] {{{ runhaskell Setup.hs configure --datadir=c:/Haskell/packages --prefix=c:/Haskell/packages }}} === Then {{{runhaskell Setup.hs}}} with {{{build}}}, {{{haddock}}}, and {{{install}}}. ** Haddock doesn't recognize the latest ghc syntax, including infix type constructors and (worse) arrow notation. For now, I've switched the infix and am not haddock'ing the examples (with arrow notation). This problem is well-known. The fix will apparently be to "use the GHC API which obviously can parse all the syntactic extensions." To do: * Reply to Shulamit's note on the nvc-emergence group. * wxWidgets & wxHaskell: ** Rebuilding wxWidgets-2.6.3, this time dropping the {{{--with-msw}}}. (Inspired from a post [[here| http://blog.gmane.org/gmane.comp.lib.wxwindows.general/day=20041207]].) New config command: {{{../configure --disable-shared --with-opengl --enable-unicode}}} . ** Noticed in configure output: {{{checking for --enable-prologio... no}}}. The first error message when compiling wxHaskell says {{{"wxUSE_PROLOGIO must be defined"}}}. If after recompiling wxWidgets and then wxHaskell, I still get the {{{wxUSE_PROLOGIO}}} error, then rebuild wxWidgets with {{{--enable-prologio}}}. ** Sure enough, I still get the {{{wxUSE_PROLOGIO}}} error, Rebuilding wxWidgets with {{{--enable-prologio}}}, I get +++[this error.]{{{configure: error: wxExpr and old-style resources are now available in contrib only}}}=== I don't know what else to try. ** Oh -- I think the wxHaskell build is finding the old wxWidgets (2.4). The error message references {{{c:/cygwin/usr/local/include/wx/}}}, which is older than {{{wx-2.6}}} in the same directory. I renamed wx to wx-2.4 to avoid this accident and recompiled. Now wxHaskell is compiling. Sent wxHaskell-users +++[message.]>The compilation problem below seems to have been that my wxHaskell compile was picking up my old wxWidgets-2.4 include files rather than the new 2.6.3 ones. Looking in /usr/local/include, I saw wx/ and wx-2.6. Although the wxHaskell compilations explicitly ref'd include/wx-2.6, the error messages below ref'd include/wx, I fixed the problem by renaming include/wx to include/wx-2.4 and recompiling wxHaskell.=== Also mentioned the "multiple target patterns" problem and the {{{-fPIC}}} compiler warnings. ** The working configurations: *** wxWidgets-2.6.3: {{{../configure --disable-shared --with-opengl --enable-unicode}}} *** wxHaskell-0.10.1: {{{./configure --prefix=/usr/local --with-opengl}}} ** Success! Now I have wxHaskell-0.10.1 and wxWidgets-2.6.3 working together with unicode, and no more truncated strings. ** One more thing to remember in building wxHaskell: build the documentation. I don't know how to get the docs registered so they're picked up by haddock for libraries that //use// wxHaskell (like Phooey). As a hack-around, I edited the wxcore and wx entries in ghc's {{{package.conf}}} +++[like so.] {{{ haddockInterfaces = ["c:/Haskell/wxhaskell/out/doc/wxhaskell.haddock"], haddockHTMLs = ["c:/Haskell/wxhaskell/out/doc"] }}} === ** To file wxHaskell bug reports, see the [[project page| http://sourceforge.net/projects/wxhaskell]]. * Phooey: ** Asked Simon Marlow for a darcs.haskell.org account. Sent him my public {{{id_dsa.pub}}}. I'll place my projects there, since people already look there and it's presumably set up well or could be for things like darcs-server. ** All of my Phooey modules are haddock-commented. ** My dynamically-bounded slider example (ui2) crashes. Odd! * Read [[blog entry| http://therning.org/magnus/archives/228]] with a program for recursive directory listing. I'd like it to be simpler and have the IO bits more separated. Playing in {{{~/Misc/ListFiles.hs}}}. For one thing, I'd like to separate out the tree structure from the linearization & path concatenation. Use rose trees {{{Data.Tree}}}. * Men's group: Matthias called to check in. I told him that I'm starting the men's group we talked about. He's excited about it. Include him ({{{m8p.kai@gmail.com}}}) in my email about it. Called and left message with Pan. * Phooey: ** Submitted wxHaskell [[bug report| http://sourceforge.net/tracker/index.php?func=detail&aid=1610984&group_id=73133&atid=536845]] (process dies on second {{{start}}}). Also sent to wxHaskell-users with query. Heard from Jeremy O'Donoghue that this is a known problem with wxWidgets-2.6.3.+++> There //is// a workaround for this: use wxWidgets 2.4.2 or earlier, which have a different allocation/deallocation strategy. This is why we continue to support wxWidgets 2.4.2.=== This may be a serious problem for me. ** Pondering library organization. I made {{{Graphics.UI.Phooey}}} re-export most of {{{Graphics.UI.Phooey.UI}}}, while hiding the representation. ** Renamed module UIM to ~MonadicUI and UI to ~ArrowUI. ** Lots of haddock documentation, including a longish friendly intro in Phooey.hs. ** Lots of flailing around with darcs & ssh. My files & directories are getting created so that they're unreadable or unsearchable by "group" and "other". That messes up use on the server. Tomorrow check into UMASK. * Can I implement Eros on Phooey? Maybe just the visualization part of Eros. * Discovery: Emacs ispell-minor-mode is causing the funny behavior I get at the start of text buffers when I hit space or enter after non-alphabetic characters. Both keys are bound to ispell-minor-check. To do: look for a newer ispell. * MSA members get 12% [[discounts on Dell computers| http://whatcounts.com/t?r=1374&c=731025&l=34283&ctl=15381FE:D17492C51C91D73DED0EA8EC1A0E58CFA04380EFC0D89E1E]] * Replied to Louise Taylor, a student in the UK who asked about some of my Pan imagery. * ssh issues ** Heard from Simon Marlow that the "Patch bundle failed hash" bug is known, and "The one known workaround is to use the SSH that comes with MSYS and set CYGWIN=binmode." I also sent Daan a note asking about his experience, since he uses darcs with servers, from Windows. ** Downloaded [[msys-1.0.10|http://www.mingw.org/download.shtml]]. Doesn't contain an ssh. Asked Simon Marlow. ** Trying [[PuTTY|http://www.chiark.greenend.org.uk/~sgtatham/putty]]. * Playing with adding glow to my power design. | [img[http://conal.net/Pan/Gallery/transparent/m-glow-inv-150.png]] | [img[http://conal.net/Pan/Gallery/transparent/m-glow-150.png]] | [img[http://conal.net/Pan/Gallery/transparent/z1.png]] | * Response to Henry Wai on what I hope the yahoo group will achieve.+++> Hi Henry. I like your question, as thinking about it has given me more clarity. I've amended the group description. I hope to achieve: * well-grounded clarity about needs met and needs unmet by cnvc certification, to make it more likely that needs currently falling through the cracks are more likely to get addressed; * seed collaboration on creating new means of meeting those needs; * confirmation and/or redirection for my [[Emergence plan| http://emergence.awakeningcompassion.com]], so that it will support people's needs for learning, growth, community, and contribution as well as possible; * learn whether top-down quality control strategies like certification do indeed thwart growth, as I and some others have suggested and still others have questioned; and * encouragement & community for myself and others sharing nvc. As for how the group might achieve these goals: * Through discussion of support quite different from what cnvc has been offering, people can get a new sense of possibilities and choice. * As people share their personal goals behind their choice to pursue cnvc certification, especially in the light of other possible strategies, the underlying needs can become clearer and more room for separating those needs from the current certification strategy. * Discuss the "bottleneck" issue for a reality check. === * Correspondence with Patrick Mulder, who asked me about running Fran. Yesterday I updated the Fran page to say that Fran no longer runs and to invite collaboration on the topic. Patrick has been using Pan# (Yale C# variation of Pan). * Re-reading [[How to write a Haskell program| http://haskell.org/haskellwiki/How_to_write_a_Haskell_program]]. It's easy to set up darcs to run ~QuickCheck tests at each commit. Apparently, the commit only happens if the tests all succeed. Neat! * Phone chat with Jake. I enjoyed catching up. * To make a release, do e.g., {{{darcs dist -d phooey-0.0}}}, which makes {{{phooey-0.0.tar.gz}}}. Also tag via {{{darcs tag 0.0}}}. * Try again using wxWidgets 2.4.2 instead of 2.6.3, so I can use wxHaskell (and therefore Phooey) with ghci. See [[2006-12-07]]. ** Went back to {{{c:/wxWindows-2.4.2/mybuild/}}} and did {{{make install}}}. Note: created {{{c:/cygwin/usr/local/include/wx/}}}, as mentioned on [[2006-12-06]]. ** Recompiling wxHaskell: *** Do I have to recompile wxHaskell? Try first without. Crash. ** Configure then {{{make}}}, without {{{make clean}}}. Lots of linker errors. *** {{{make clean}}} then {{{make}}}. I had the problem with {{{popupwin.h}}} mentioned on the [[wxHaskell build page| http://wxhaskell.sourceforge.net/building.html]]. Copied as recommended. Started over with clean build. *** Now I'm back to the problem of truncating my strings to one character. *** Reconfigure and recompile wxWidgets 2.4.2, using config line from [[2006-12-06]]. Same problem. Also rebuilding wxHaskell. Long wait .... Truncation. *** Start all over with building wxWidgets 2.4.2, starting fresher. Not just a "make clean", but remove the contents of mybuild to make sure nothing old was hanging around. Configured, built, and installed. Everything works! Full, untruncated strings. Multiple starts in ghci. And I prefer the visual style of sliders in 2.4.2 over 2.6.3. * Return to the Phooey paper? Two main contributions: ** The interface design ** Systematic development of the implementation by deconstruction imperative GUI programs. * A while back, I changed my emacs ~LaTeX mode to use longlines minor mode. Works fine in a pure ~LaTeX file, but not in a literate Haskell script with parts in Haskell mode and parts in ~LaTeX mode (mixed via mmm-mode). If I save the file and then load it, I see hard line breaks. I verified that the lines get saved long. For now, I commented out the {{{longlines-mode}}} line in my {{{tex-mode-hook-function}}}, with the intention of doing it manually when I edit. Or a magic comment that gets executed by emacs. Found +++[a working incantation.] {{{ %% Local Variables: %% eval:(longlines-mode) %% End: }}} === Maybe I'd better just not use longlines-mode here, since it may hinder collaboration. Hm. * Don Stewart posted a [[blog entry| http://cgi.cse.unsw.edu.au/~dons/blog/2006/12/11#release-a-library-today]] that "describes the entire process of writing, packaging and releasing a new Haskell library." It's mainly a shell transcript. * In a reply to Roger, I wrote a note with [[comments on the word "trainer"]]. * Simon M pointed me to msys-DTK, which has the msys version of ssh he uses. See [[2006-12-08]]. I installed it and uninstalled cygwin's openssh. Now, finally, I can {{{darcs put}}} and {{{darcs push}}} to {{{darcs.haskell.org}}}. Works great as long as I don't use {{{darcs amend-record}}}. * Finished my Phooey-0.0 release! Sent out [[phooey-0.0 release note]]. * Caught up on http://planet.haskell.org reading. * Phooey: ** Appeared in [[today's Haskell Weekly News| http://sequence.complete.org/hwn/20061212]]. Good timing! ** Added to the [[GUI libraries| http://haskell.org/haskellwiki/?title=Libraries_and_tools/GUI_libraries]] page of haskell.org. ** Got some queries on the haskell mailing list: *** Antony Courtney asked how Phooey is like or unlike Yampa. (I wonder why Yampa and not Fruit. Maybe modesty.) Also, asked about "dynamic collections", an old Fruit issue. *** Brian Hulley asked what I mean by inversion of logical dependencies in imperative UI programs. Also about implementation efficiency and evaluation strategy. He suggested that laziness may make efficiency easy. I think he's right. He also suggested an +++[example for the paper.]> If you're still thinking of examples for your paper it would also be really great to see how you'd create a widget that displays the result of another process (eg a window that displays the output as ghc compiles a program) or some other example of how to use the IO monad inside a widget for those unfamiliar with combining arrows with monads.=== ** Working on the Phooey paper. * Stumbled on [[TimeToMeet| http://www.timetomeet.info]], a free, web-based meeting scheduler, which I've been wanting for setting up the men's group. I couldn't remember where I'd seen such a thing, when I looked yesterday. I found it quite synchronistically today, in tracing back links to Phooey as reported by statcounter. * Yoiks! I got blog-spammed. One of my blog entries got five spam comments. I turned on "word verification", which asks a commenter to read and enter a word. Alternatively, I could turn on "comment moderation", which notifies me of comments and won't post them until I give the okay. Moderation might be more convenient for commenters, but I'd still have to deal with spam. In the process, I upgraded my blog to the new [[Blogger beta| http://beta.blogger.com]]. * Phooey: ** Doaitse Swierstra pointed out the my Phooey docs have some references to other library docs on my local machine. I don't know how to fix the problem, so I sent a query to libraries@haskell.org. ** Read and replied to a note from Steve Schafer. He suggested that push-vs-pull is more the crux of the "dependency inversion" issue. I like the clarity of this dialog and want to improve the paper accordingly. ** After looking at the apache log analysis (awstat), I updated the statcounter to offset my late creation. I had 892 Phooey visits yesterday, having sent my announcement at 11:15pm on the previous night. Jeepers! * More on web stats: ** I want to get all of my server log info analyzed and graphed. It takes a long time, with the monthly logs (and running in Perl). Set up regular, incremental analysis to run nightly. ** Made some helpful aliases for manual analysis, like {{{awupdate phooey}}} and {{{aw}}} (for {{{awupdate conal}}}). ** I don't know how to specify the log file from my awstats config file. Log files have names like {{{access_log.2006-11-18}}}. They roll over every 30 days. For now, just edit {{{/etc/awstats/awstats.conal.conf}}} after each roll-over. There has to be a better way. ** Got web-based dynamic invocation going. Visit http://awstats.conal.net to see results of the last analysis. ** Enabled web-based //update//. See the "update now" button on the [[stats page| http://awstats.conal.net]]. ** I made {{{awstats.phooey.conf}}} that greps through my log file for "phooey". It has to grep through the whole thing, so it's pretty slow. ** Found [[Analog| http://www.analog.cx]], billed as "The most popular logfile analyser in the world", and reportedly darn fast (implemented in C -- "56 million logfile lines in 35 minutes on a 266MHz chip"). I don't see incremental analysis. * Web statistics: Cranking through old logs. I hope I'm not impacting Joseph's server noticeably. It doesn't work reliably when I process the log files in //reverse// chrono order, so I'm going forward. * Thoughts on [[Eros| http://conal.net/papers/Eros]]: ** It keeps logic and UI //together and separable//. ** Maybe release it in stages: *** Visualization of pure values. *** Add programmatic composition, via the Eros algebra. *** Add gestural composition. ** I want to layer Eros neatly on top of Phooey. Probably doable without gestural composition. With? * There was a major wind and rain storm last night. The power went out and came back on only this evening. The news said a million people in western Washington were without power. This morning, Holly & I found that there was power in Shoreline (adjacent to Lake Forest Park), so we had lunch at Sunni's and then went to Hotwire to use our computers. We've been making Christmas presents, and I'm excited about what we're coming up with. * This morning, I started thinking about how to recreate Eros on top of Phooey. I'm still noodling it through, but I expect it will come out very elegantly. * My files are all read-protected from others. Some are read-write-protected from me. Some have funny owners. +++[Fixed.] {{{ chmod -R u+rwX . chmod -R og+r . chown -R Conal . chgrp -R None . }}} The capital X means "execute only if the file is a directory or already has execute permission".=== Unfortunately, new files that I create through Windows still come out as readable only by me. I don't know why. Reading up on [[permissions & security for ~WinXP| http://support.microsoft.com/kb/308418]].+++> ''Windows XP Home Edition'' Start in safe mode, and then log in as Administrator or as Administrative User. The Security tab is available for files or folders on NTFS volumes. ''Notes:'' * The Everyone group does not include the Anonymous Logon permission. * You can set permissions only on drives that are formatted to use NTFS. * To change permissions, you must be the owner or have the owner's permissions to change permissions. * Groups or users that are granted Full Control permissions for a folder may delete files and subfolders in that folder, regardless of the permissions that protect the files and subfolders. * If the check boxes in the ''Permissions for //user or group//'' box are unavailable, or if the ''Remove'' button is unavailable, the file or folder has inherited permissions from the parent folder. For more information about how inheritance affects files and folders, see Windows Help. * By default, when you add a new user or group, the user or group has permissions for Read and Execute, List Folder Contents, and Read. === * Web statistics: ** Made an index page at http://awstats.conal.net, which redirects to http://conal.net/stats.html. ** I want to set up murraycreek.net. The format got changed from "common" to "combined" on March 2, 2005. I'll have to split the access log into two pieces and process them separately. Process the old logs, too. And clc press. ** Here's what I like about awstats, compared with statcounter: *** it doesn't require anything on the individual web pages; *** it catches absolutely all domain activity; *** it keeps bazillions of detailed records (for free), not just the last hundred; *** it shows old web traffic stats as well as new; and *** it doesn't slow down visitors. ** See [[2006-12-17]] for down side. * Spent most of the day making Christmas presents. Since this journal is online, I won't say just yet what the presents are. * Started working on my new implementation of Eros on Phooey. I'm calling the new library "TV" for "Tangible Values" (Sean Seefried's suggested replacement for "concrete values"). I'm happy with this layering approach, as a way to make Eros easier to work with and understand. More exposed layers also gives me more points of contact with other people. * We got California Christmas presents finished and mailed off. * TV (Tangible Values): ** Implemented the conversion from //Output a// to //UI a ()//. Very simple, which speaks well of my chosen abstractions. ** Noodling out this conversion lead me to realize that my //UI// arrow can be used in a style that mixes UI & functionality or keeps them separate. I can always take the separate style (//ui :: UI a ()//) and mix them (//pure (const a) >>> ui :: UI () ()//). ** My first few simple examples work great. ** Formatting problem: ** Added some [[darcs tips]]. * Thoughts on Tangible Values, etc: ** See thoughts on [[separating IO from logic]]. ** The TV approach uses ~UIs as a means of visualizing pure (functional) values. How far can we take this idea? Can we view //all// ~UIs as visualizations of pure values separable from the UI specification? Sounds fun to pursue. Given a program with a UI, ask //what is the type// of the value being visualized? ** I parameterized my //Output// and //TV// type constructors over the underlying arrow (e.g., //UI//). Make another example as well for //Kleisli// arrows. * Idea: implement readFile and writeFile in terms of a primitive that uses an efficient packed representation, such as byte-strings. Layer conversion to & from standard (inefficient) strings to implement the current readFile & writeFile. Have fusion do the rest. * TV: ** Added support for Kleisli arrows and IO in particular. +++[Examples:] {{{ type KIO = Kleisli IO interactO :: Output KIO (String -> String) interactO = oLambda (kIn getContents) (kOut putStr) io1 :: TV KIO String io1 = tv (kOut putStrLn) "Hello World!" io2 :: TV KIO (String -> String) io2 = tv interactO reverse testO :: Output KIO (String -> String) testO = oLambda (kIn (readFile "test.txt")) (kOut putStr) onLines f = unlines.f.lines onWords f = unwords.f.words perLine f = onLines (map f) perWord f = onWords (map f) -- :: TV KIO (String -> String) io3 = tv testO (onLines reverse) io4 = tv testO (onWords reverse) io5 = tv testO (perLine (onWords reverse)) io3' = tv testO (perLine reverse) io4' = tv testO (perWord reverse) io5' = tv testO (perLine (perWord reverse)) }}} === ** Think about new name, since the project is more general than "tangible values". * Operations that combine input & output don't seem to fit very well into the //TV// style. For instance, //readFile// can have a file name determined dynamically. Similarly for sliders with dynamic bounds. * Added text input to Phooey. Bumped version number to 0.1. Remember to record and push and "make webdoc" before I announce TV. * Web server log file analysis: ** Project idea: fancy & efficient [[Haskell-based web server log analyzer]]. ** For friendlier web stats, use [[logresolve| http://httpd.apache.org/docs/2.0/programs/logresolve.html]] to do reverse DNS lookup. It does its own caching.+++>logresolve is a post-processing program to resolve IP-addresses in Apache's access logfiles. To minimize impact on your nameserver, logresolve has its very own internal hash-table cache. This means that each IP number will only be looked up the first time it is found in the log file.=== Tested on my 2006-12-18 log file (recently started, only 98k). Canceled after a long wait. Tried the first thousand lines, which took a good while. I think this version of logresolve (found on Joseph's server) does only //transient// caching, while version 2.0 does //persistent// * Read article [[Crossing borders: JavaScript's language features| http://www-128.ibm.com/developerworks/java/library/j-cb12196/?ca=dgr-lnxw01Javascript-Respect]]. Focuses on higher-order functions and ~JavaScript's prototype-based object mode. * Helping Sandy redirect the new web site. * Wrote up some thoughts on releasing [[Pajama]] as an open source software project. * TV: ** Thinking about how to define Input as a functor and Output as a co-functor, so I can conveniently fmap & cofmap. ''Problem'': suppose I have a pair-valued output, and then I fmap a function over the pair output. The resulting output may have a non-pair type. I've been thinking that fmap would have //no visible effect// (on the UI), which means that the user would still expect to be able to apply a function to a //part// of the pair-value output. Denotationally, however, the pair is gone and generally unrecoverable. This problem comes not just from pairs, and would happen in any fmap, since the type and the appearance are out of sync. ''Idea'': render an fmap with a single output handle and suppress all output handles in the inner output. Perhaps add some kind of decoration as well. In terms of composition, the fmap is treated a primitive. ** Now generalize functor/co-functor to //arrow// and fmap/cofmap to right & left composition with an arrow (possibly but not necessarily //pure//). ** Examples: *** Convert string input or output to value input or output via //fmap read// or //cofmap show//. *** Reading a dynamically named file. Get the name with a name input, and then compose with //Kleisli readFile// to get a contents input. *** Dynamically bounded sliders. Get the bounds with an input, and compose with the dynamic slider UI (arrow) to get a value input. (More realistically, the dynamic bounds would be wired into a single compact & friendly widget.) ** The dynamically bounded slider example reminds me of dependent types. ** Combining arrows, like UI & KIO, for file reading in a GUI app. * Learn about XUL, which may be useful with Pajama. ** [[Wikipedia entry|http://en.wikipedia.org/wiki/XUL]]: "XUL (pronounced zool ([zu:l])), the XML User Interface Language, is an XML user interface markup language developed by the Mozilla project." ** [[Firefox add-ons|https://addons.mozilla.org/search.php?cat=2]] ** [[Google search|http://www.google.com/search?q=xul]] yields many references. * Also learn about [[Twisted| http://twistedmatrix.com]]. "Twisted is a networking engine written in Python, supporting numerous protocols. It contains a web server, numerous chat clients, chat servers, mail servers, and more." * Trying out Joseph's http://kyte.tv. See [[kyte notes]]. Sent in feedback. Made a channel and a show. See the [[functional graphics channel]]. * Holly's birthday! * Wrote up some thoughts on an [[community NVC blog]]. Sent a reply to Roger's note. * Web server stuff: ** Joseph gave me the go-ahead to turn on reverse DNS lookup for my domains. He doesn't think it'll make much server impact. It was easy: just add {{{HostnameLookups on}}} in the virtual host section of the apache config. I'm running logresolve over some of my old log files to get names and will then rerun awstats. ** Got murraycreek.net and clcpress.com awstats going. In each case, I split up the log file in two, as it started with common format and then went to combined. Processed part separately. I also turned on name lookup for future accesses. ** Refactored my config files. Now //much// easier to manage. ** Cleaned up emergence logs. Moved out logs for journal and gtd redirections. Gee -- server logs are not very informative for a ziddlywiki. The contents get downloaded all at once, so I can't tell what is actually viewed. With microcontent, I'd especially like more info. * I want to change how log files are kept. Log rotation and awstats collection don't work well together, since the newest log file has an unpredictable name. Oh -- maybe it is predictable. What about listing them all, sorting by date, and taking the most recent. The following line ought to do it: {{{cat ls -t access_log.* | head -1}}}. * Got an inquiry comment on my blog about Eros. I want to get it running again. Found that hs-plugins isn't compiling with my ghc-6.6. Sent dons a note. * Great TV progress. Added arrow-composition constructors to Input and Output.+++ \begin{code} iCompose :: Input arr a -> arr a b -> Input arr b oCompose :: arr a b -> Output arr b -> Output arr a \end{code} === Made "pure" versions for fmap and cofmap.+++ \begin{code} instance Arrow arr => Functor (Input arr) where fmap f input = input iCompose pure f instance Arrow arr => Cofunctor (Output arr) where cofmap f input = pure f oCompose input \end{code} === Very elegant examples. Next, edit [[separating IO from logic]] to be consistent. * TiddlyWiki: ** Discovered that I can copy & paste from the html rendering of a TiddlyWiki into the html editing view of a blogger post, with formatting mostly intact. Cool! ** CSS tweaking. Changed margins for {{{.viewer pre}}} in StyleSheet, so I won't have to indent my code. Also tweaked the button margins & padding. ** Hmm. The previous two points don't work well together. If I leave TW CSS to do my indentation and paste into a blogger post, I'll lose the indentation. Oh! Unless I tweak my blogger formatting. Yes. But then there's gmail, and I don't know how to tweak my own CSS. Maybe by stashing away some bits and pasting them in also. ** When I put a code display into a bullet item, it doesn't get indented, since it's specified as its own paragraph. Trick: wrap with {{{++++}}} and {{{===}}}, which makes a "slider" that's initially open.++++ {{{ like such }}} === As a nice side-benefit, the reader can collapse the code display. ** Added [[syntax highlighting for Haskell|SyntaxifyPlugin: Haskell]]. * Cancelled my Yahoo registration plan for awakeningcompassion.com. I'd transferred to 1&1 last month. * Reading Simon PJ's "[[Beautiful Concurrency| http://haskell.org/haskellwiki/Talk:SantaClausProblem]]" article. That link is a "wiki talk page". What a great idea! I want a feedback page for everything I write, starting with drafts. Got inspired with some ideas on a [[feedback system for writing]]. * Edited [[syntax rules for Haskell]] tiddler to try out a new style of writing using "nested sliders" (from NestedSlidersPlugin). * In some context in Firefox, the //home// key switches to the first tab, and //control-home// moves the current tab to the first position. What was that context? * Ran across [[Sapir-Whorf and Programming Languages| http://www.cs.utexas.edu/~danb/courses/fa05/sapir-whorf/wiki]] -- a TW about a [[democratic college| http://demtexonline.com]] course. ("... to provide students with the opportunity to engage in democratic, egalitarian classes. Our classes are student designed and run.") * TV: ** Maybe I could define some ~TVs that work for both IO and GUI. Define a class of arrow types that provide some general utilities.+++ {{haskell{ promptString :: String -> arr () String displayString :: arr String () }}} === Then define "show" versions in terms of these ones and {{{Input}}} & {{{Output}}} versions of all four. ** Works great. Now most of my TV examples run both terminal-style and GUI. ** Lovely type factoring for these multi-use examples.+++ {{haskell{ -- | For operations over all 'CommonInsOuts' arrows. type Common f a = forall arr. CommonInsOuts arr => f arr a -- | Inputs that work over all 'CommonInsOuts' arrows. type CInput a = Common Input a -- | Outputs that work over all 'CommonInsOuts' arrows. type COutput a = Common Output a }}}=== * Developed a spiral clock picture.+++ Make a row of white disks.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/linear-disks.png]]}}}=== Add numbers.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/linear-numbered-disks.png]]}}}=== Repeat horizontally and carefully arrange in spiral, so that a complete set of twelve disks makes it exactly around.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/spiral-numbered-disks.png]]}}}=== Place over a background of some sort.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/spiral-lichen.jpg]]}}}=== Crop with a disk.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/spiral-lichen-cropped.png]]}}}=== Fade the numbers.+++ {{demoImg{ [img[http://conal.net/images/clocks/spiral-how/spiral-lichen-cropped-transp.png]]}}}=== === * TV: ** Made prompts orthogonal to inputs & output.+++ {{haskell{ class Arrow arr => CommonInsOuts arr where -- | Provide prompt label prompt :: String -> arr a b -> arr a b ... }}} === ** Next, make Input and Output versions. Hmm. Wrapping a prompt around //any// Input & Output is not currently possible, and I think it requires extending those ~GADTs. +++^In the process, consider merging my {{{CommonInsOuts}}} and {{{LayoutArr}}} classes. I'm reluctant to merge, since {{{CommonInsOuts}}} has some methods that might not be definable for some arrows.=== What are the implications of Input & Output versions of prompt? Benefits:++++ * For UI, I can box up separate portions, like my "shopping list" example. Maybe even different labelings of a repeated group of widgets. * For IO, I can give give directions for a function. === Drawbacks:++++ * Adding a constructor requires me to figure out how to implement the Eros composition combinators. For instance, suppose I label a lambda and then compose it with another lambda. Or I label a pair and apply a function to part of it. I suppose I could eliminate the label. Or propagate it to the remaining pieces. * //others?// === Did it. Looking great. ** Made an Output version of interact.+++ {{haskell{ interactOut :: COutput (String -> String) interactOut = oLambda stringIn stringOut }}} Well, not quite equivalent, since it uses getLine instead of getContents.=== ** Added a read/show version of interactOut. So lovely.+++ {{haskell{ interactRSOut :: (Read a, Show b) => COutput (a -> b) interactRSOut = cofmap (wrapF show read) interactOut }}} === * Got help from #haskell (dcoutts) so I can haddock TV. I just had to add "extensions: CPP" to tv.cabal, so that Cabal would preprocess my sources with {{{-D__HADDOCK__}}} before passing them to Haddock. * Finally got on track with module comments for Haddock. It's very cool, as some of the header comments are interpreted and placed into the Haddock page header (portability, stability & maintainer). * Found emacs cabal-mode. Fixed a bug and improved the templates. Set to auto-insert. * Blogs:++++ * [[the Haskell metatutorial| http://koweycode.blogspot.com/2006/12/haskell-metatutorial.html]] by Eric Kow. The article inspired me to apply my [[self-organizing FAQs]] idea to learning Haskell. The community is technical and playful, so I'd get great feedback. === * Men's group:++++ * Replied to Roger's inquiry. I got some self-empathy in the process.+++> Hi Roger, Here's the deal. Once I started moving on getting a men's group together, I got a tremendous surge of creative energy, which pulled me out of a deep slump. Since then, I've been going gangbusters on my technical projects. And I've let men's group organizing slide. I'm loving my productivity, and at the same I have a background uneasiness about ending up back in my slump without the support of a men's group. And besides, I remember enjoying the connections I've had in past groups. So, for my own well-being, I do want to get back on it. I guess my recent inner resistance has been a fear that I'll get mired in details of scheduling, and have my attention pulled away from my creative work long enough that I'll lose my focus & inspiration. Perhaps I can give myself some reassurance that I'll keep placing a high personal priority on my projects while taking some more steps about getting a time for a first get-together. Or (gasp) I might even ask for help. Warmly, - Conal === * Now I want to define a "next action" (GTD). It would be something that lets me get a meeting scheduled. I don't know whether to go low-tech with an email conversation about times, or high-tech with http://timetomeet.info. Here's an action: make an experimental meeting request with Holly. Did it. Now waiting for her reply. I'm tempted to think about what to do next after that. Try letting it go and entrusting the decision to my more informed future self. === * TV etc:++++ * How do I want to divvy up the release? Particularly, what kind of composition do I want to support and how? I want some in there, to show off the power the ideas. In particular, show how composition strips off matching input & output. * I'm uncomfortable with the size of Eros's ~ArrowX type class. I stuffed a bunch of polymorphic functions in there, because (a) I don't know how to deal with polymorphism, and (b) I wanted the Input/Output structures to be transformed in a specific way. My worry is that I've omitted some functions and at the same time have missed solving an important problem. (Cf the 0, 1, &infin; principle). I could release a subset initially, and extend later if I still don't know a better way. Or I could release as is, point out what I don't like, and ask for help. I like this latter choice, as it's a departure for me and invites collaboration. === * Blogs:++++ * [[The Haskell metatutorial| http://koweycode.blogspot.com/2006/12/haskell-metatutorial.html]] by Eric Kow. The article inspired me to apply my [[self-organizing FAQs]] idea to learning Haskell. The community is technical and playful, so I'd get great feedback. === * Men's group:++++ * Replied to Roger's inquiry. I got some self-empathy in the process.+++> Hi Roger, Here's the deal. Once I started moving on getting a men's group together, I got a tremendous surge of creative energy, which pulled me out of a deep slump. Since then, I've been going gangbusters on my technical projects. And I've let men's group organizing slide. I'm loving my productivity, and at the same I have a background uneasiness about ending up back in my slump without the support of a men's group. And besides, I remember enjoying the connections I've had in past groups. So, for my own well-being, I do want to get back on it. I guess my recent inner resistance has been a fear that I'll get mired in details of scheduling, and have my attention pulled away from my creative work long enough that I'll lose my focus & inspiration. Perhaps I can give myself some reassurance that I'll keep placing a high personal priority on my projects while taking some more steps about getting a time for a first get-together. Or (gasp) I might even ask for help. Warmly, - Conal === * Now I want to define a "next action" (GTD). It would be something that lets me get a meeting scheduled. I don't know whether to go low-tech with an email conversation about times, or high-tech with http://timetomeet.info. Here's an action: make an experimental meeting request with Holly. Did it. Now waiting for her reply. I'm tempted to think about what to do next after that. Try letting it go and entrusting the decision to my more informed future self. === * TV etc:++++ * How do I want to divvy up the release? Particularly, what kind of composition do I want to support and how? I want some in there, to show off the power the ideas. In particular, show how composition strips off matching input & output. * I'm uncomfortable with the size of Eros's ~ArrowX type class. I stuffed a bunch of polymorphic functions in there, because (a) I don't know how to deal with polymorphism, and (b) I wanted the Input/Output structures to be transformed in a specific way. My worry is that I've omitted some functions and at the same time have missed solving an important problem. (Cf the 0, 1, &infin; principle). I could release a subset initially, and extend later if I still don't know a better way. Or I could release as is, point out what I don't like, and ask for help. I like this latter choice, as it's a departure for me and invites collaboration. * Define the Eros operators on the new Input & Output constructors.++++ * Title. Suppose I transform part of a titled pair. I could drop the title, or maybe propagate it with change. For instance, for a pair titled "foo", an unchanged first component could get the title "first of foo". (Or for a lambda, "argument of foo" or "result of foo".) What about the transformed part? I guess just drop it. * Compose. Examples:++++ * Turn a string output into a number pair output, using {{{cofmap show}}}. Apply fstA. I don't think any algorithm could figure out what to do. Harder example: turn an image output into a number-pair output by rendering the pair as a disk whose center is at the given coordinate pair. Then apply fstA. * String->String version of grading program. Suppose I have titles for input and output and for the whole function. If I {{{cofmap (wrapF unparse parse)}}}, I won't even have an outer lambda. Now compose with another function. Yow. === These problems are not new to {{{OCompose}}}. They're already present with {{{OPrim}}}. My Eros solution was to have an {{{OEmpty}}} operator and use it when destructuring pairs or functions.+++ {{{ asOPair :: Output (a,b) -> (Output a, Output b) asOPair (OPair a b) = (a,b) asOPair p = (OEmpty,OEmpty) }}} === I wonder if instead I could rely on some kind of default output. I have a default output type class in Eros, but I don't think I can rely on having a type instance. === === * TiddlyWiki:++++ * I want to copy & paste from literate Haskell code into a tiddler. Gave a first shot at [[literate Haskell plugin]]. Got stuck and asked for help on the TW group. I also realized that I haven't gotten a reply to my last request because I forgot to send from my gmail account. I created another google account as conal@conal.net and joined the TW group, choosing no email delivery. * Got an answer from Bob M. For the loading dependency problem, just pick a name that comes after the name of the syntax plugin. So I renamed mine [[SyntaxifyPlugin: Haskell]]. * Bob also pointed me to [[an example| http://bob.mcelrath.org/tiddlyjsmath.html]] that would help with my [[literate Haskell plugin]]. This example renders ~LaTeX via Javascript, which is awesome. Gives me ideas. For instance, run my literate Haskell code through [[lhs2TeX| http://www.informatik.uni-bonn.de/~loeh/lhs2tex]] to make it look mathy. * Going further, organize a whole literate Haskell program as a TW.++++ * One tiddler per top-level definition. * Scan the code and automatically generate links for names that match tiddlers. * Use a custom field for the type. * Seems a shame to state the name twice. Could get inconsistent. At the least, have a consistency check. Better, generate the name automatically. * What about multiple modules? Perhaps look to Haskell to give a good answer for TW, rather than vice versa. That is, equate ~TWs with modules and have an explicit export & import. We could import all kinds of stuff between ~TWs, including Javascript code and CSS. === === * I didn't get much sleep last night, so I've been pretty out of it today. * Wrote a [[reply to Jeremy Ruston about dependencies]]. * TiddlyWiki:++++ * Flailed about with my [[literate Haskell plugin]]. Asked for [[help| http://groups-beta.google.com/group/TiddlyWiki/browse_frm/thread/12da5bf596bcc312]] again. Bradley Meck pointed me to his NestedFormatterMacro, which is a great fit. Added a specialized function to his code to make things really simple.+++ //{{{ config.formatters.push( config.macros.nestedFormatter.substAndWikify( "haskellCode", "^\\\\begin{code}\n", "^\\\\end{code}\n", function (s) { return "{{haskell{\n"+s+"}}}"; } )); //}}} === * Idea: have WikiNames render as links //only// if the tiddler exists. That tweak would save me a lot of twiddling but still let me conveniently refer to system tiddlers. === * TV:++++ * Release the Eros algebra and encourage its use independently from UI and gestural composition. Put in the hierarchical libraries, but where? And what to call it (rather than my current "~ArrowX")? * Include only the methods needed to support the composition algebra. I can subclass to add more. Besides the Arrow methods, include result, funF, funS, funR, idA, flipA, curryA, uncurryA. The last three are used to define inpF, inpS, inpFirst and inpSecond. If I throw in swapA and dupA, then I get some nice defaults that don't use arr. And fstA, sndA enable a default for swapA. Only lAssocA and rAssocA remain. Note that Arrow uses swapA and dupA.+++ {{haskell{ class Arrow a where ... second :: a b c -> a (d,b) (d,c) second f = arr swap >>> first f >>> arr swap where swap ~(x,y) = (y,x) ... (&&&) :: a b c -> a b c' -> a b (c,c') f &&& g = arr (\b -> (b,b)) >>> f *** g }}} === I recommend similar definitions for ~ArrowX instances.+++ {{haskell{ second f = swapA >>> first f >>> swapA f &&& g = dupA >>> f *** g }}} === * What about pure/arr? They're not required for the composition scheme, and they prevent code generation. For ~OFun, I use ~OEmpty, i.e., invisible. Keep discouraging its use. It's fundamental to desugaring of arrow notation. * When I revive my Eros code, I'll want Haddock docs, but Haddock doesn't understand infix type operators. Rather than rewriting my type signatures, have a whack at tweaking Haddock. Hopefully soon Haddock will integrate more with GHC, and then it will understand all GHC syntax. * Did it. Haddock now handles infix type operators.+++ {{haskell{ foodle :: Arrow (~>) => a~>b -> b~>c -> a~>c foodle = undefined doodle :: Arrow arr => a arr b doodle = undefined }}} === I submitted my changes.+++ {{{$ darcs send
Patch bundle will be sent to: Simon Marlow <simonmar@microsoft.com>

Sat Dec 30 17:53:05 Pacific Standard Time 2006  Conal Elliott <conal@conal.net>
* infix type exprs
Handle types like "a ~> b" and "a arr b".  I especially like this
notation for arrow types.
Shall I send this patch? (1/1) [ynWvpxqadjk], or ? for help:y
Successfully sent patch bundle to: Simon Marlow <simonmar@microsoft.com>.
}}}
===   This darcs patch is my first on someone else's code!
* Pondering what to rename my ~ArrowX class.  What's its purpose?++++
* Enable point-free value construction.
===
===
* TiddlyWiki:++++
*  Inspired by the layout of NestedFormatterMacro, I revised the visual style of [[SyntaxifyPlugin: Haskell]] and [[literate Haskell plugin]].  Pretty.  Changed "About" to "Description" for grammatical consistency.
*  Read through [[Regular Expressions for client-side JavaScript -- a free online quick reference| http://www.visibone.com/regular-expressions]].  Wow -- powerful regexps.
*  Started listing LoadDependencies and UseDependencies in my plugins.
*  Made a [[smart quotes]] plugin.  Shared on the TiddlyWiki group.
I end up ~TwiddlePrefixing a lot of CamelCase words that I'm not intending as WikiLinks.   I think what I'd like is to render such words as links only if the tiddler exists.  That tweak would save me a lot of twiddling but still let me conveniently refer to system tiddlers.  Does anyone have such a hack or Any ideas on how to do it?
===
Got back a pointer.  Made my own modification, via [[conditional WikiLink formatter]] and [[startup]].  Then Eric Shulman told me about DisableWikiLinksPlugin, which is friendlier.  Note his use of {{{<option ... >}}}.  Remember that for my customization.
===
* TV & Eros:++++
*  Divide up the libraries as follows:++++
*   Phooey as is;
*   Eros's ~ArrowX and supporting;
*   TV, including ~ArrowX instance and examples of use;
*   Rest of Eros.
===
*  To sort out:++++
*   New name for ~ArrowX and place in hierarchy;
*   TV syntactic composition examples (including "pipe");
*   Gestural composition.
===
*  Purpose & nature of the Eros algebra: non-syntactic composition of transformations (editors) on typed values, ~GUIs, etc.  "Deep function application" (deep functions and deep application).
*  Name ideas: ~DeepApply, ~DeepTransform, Arrow.Deep.  I like this last one as the name of the module and class.  What to call what's now "Function" (conversion to & from functions)?
===
Type the text for 'New Tiddler'
Type the text for '2006-12'
* Arrow.Deep:++++
*  Working through the implementation.
*  Came up with {{{arr}}}-free defaults for left- and right-association.+++
\begin{code}
lAssocA :: (a,(b,c)) ~> ((a,b),c)
lAssocA = (idA***fstA) &&& (sndA>>>sndA)
-- arr (\ (a,(b,c)) -> ((a,b),c))

rAssocA :: ((a,b),c) ~> (a,(b,c))
rAssocA = (fstA>>>fstA) &&& (sndA *** idA)
-- arr (\ ((a,b),c) -> (a,(b,c)))
\end{code}
===  The benefit is that I can (I think) use these defaults where {{{arr}}} is unavailable.
*  Removed from Deep the method defaults that use arr, so as to improve compile-time warnings.
*  Changed tupler type constructors (Pair1, Pair2) to be newtype wrappers around tuples.  Aesthetically, I like using exactly the same representation as tuples, and the code came out a little simpler.
*  Init'd a darcs repo.  Updated [[darcs tips]].
===
* TiddlyWiki:++++
*  Fun TW project: randomized logos.  Make a bunch of logo images for my journal.  Make a macro that randomly returns one argument out of many.  Use it in SiteLogo.
*  Do: extract my code from NestedFormatterMacro, making it be its own plugin.
*  Created [[RewritePlugin]] and [[RewritePlugin: Haskell]] to make my code prettier.  But oops -- I don't yet know how to run it inside my Haskell environment.  Idea: make a second formatters list, filtering for the ones that contain a special field.  Temporarily replace the main formatters before calling subWikify, and then restore.  Or maybe go ahead and wikify the contents.  What could happen?
*  Found that my journal doesn't work under Internet Explorer.  Bummer.  Asked for help.
===
* Other:++++
*  Revisit my [[power design| http://conal.net/Pan/Gallery/xorgonRings/g.png]].  Use three rings instead of six.
===
*  Converting TV to use DeepArrow.  Copied and updated OFun from Eros.
*  Chatted with metaperl about arrows.
*  Noticed that there are two monad transformer libraries.  I used Iavor's "monadLib" for Phooey, but now I see "mtl", which may be better integrated with the core libraries.  Urk.  I'd like simplicity and uniformity.  I grabbed, compiled & install mtl and started converting Phooey to use it.  I'll have to change a lot, so I stopped for now and sent in a note to the Haskell libraries list asking about the two choices.  ''Problem'': Iavor's Monad.Id module does not export the Id constructor.  I fixed it and recompiled.  But now TV depends on an unreleased monadLib.
*  How do I want to use composition in TV?  One simple example is composition (piping).  Build it out of the DeepArrow operators, as an example.  Did it.  Works.
*  How do I want to handle titles and composition?  Added some thoughts to [[experiments with Output destructuring]], about titles.
===
* Web statistics:++++
*  I had five-day gap in December's [[stats| http://awstats.conal.net/-conal]].  Fixed it.
*  I'd like a different approach to log rotation.  The current one keeps changing the name of the current log file (every 30 days in my set-up), which means that the awstats config files have to get changed regularly.  I'd rather have the old log file get renamed.  The last bit would have to get processed by awstats and the server restarted (so it writes into the new log file instead of the old renamed one, since the writing tracks file descriptor).
===
*  More [[experiments with Output destructuring]].  Eliminated the automatic title transformations.
*  Shae E (shapr) reminded me on #haskell that he is working (for pay) on [[HAppS -- Haskell Application Server| http://happs.org]].  I have a hunch that my TV/Eros work is applicable.  HAppS applies a monadic (and non-IO) discipline with write-ahead loggin to assure ACID (atomicity, consistency, isolation, durability).  It "lets your app be simply a set of functions with types like:" {{{a -> MACID b}}} (from the [[tutorial| http://happs.org/HAppS/doc/tutorial.html]]).  Wrapping up these functions as a Kleisli arrow, I can drop it into TV.	What benefits would arise?  Remember that the arrows are used as //interfacing// around functional values.  To explore the possibilities, implement my [[self-organizing FAQs]] idea in Haskell on HAppS.
*  Started a project tiddler on [[TV]].
===
* TiddlyWiki:++++
*  Updated twee.el (TiddlyWiki editing mode for emacs) to automate insertion of sublists with nested sliders.  Now it's super easy to keep my delimiters balanced and my indentation as I want.  Sent to Chris Klimas.
===
* Misc:++++
*  I realized why I'd been having trouble with some of my email filter rules, in which I join many sender patterns with "OR".  The problem was operator precedence.  "OR" binds more tightly than juxtapositon.  Solution: quote any pattern that contains spaces.  I also had to quote patterns with asterisks.
===
* TiddlyWiki:++++
*  Twee:++++
*   Played with [[tweebox | http://gimcrackd.com/etc/src/#Twee]] (.tw -> .html) & [[untwee| http://gimcrackd.com/etc/src/#%5B%5BConvert%20an%20existing%20TiddlyWiki%20to%20Twee%20format%20online%5D%5D]] (.tw -> .html).  Did a simple test and also my whole journal.  Issues:++++
*    One blank line gets added between tiddlers on every tweebox/untwee round trip.
*    One syntax error in SyntaxifyPlugin.
===
*   Wrote [[note to Chris Klimas about twee (2007-01-04)]]
*   Messed with twee-mode some more.+++
*    Set page-delimiter to match tiddler titles.  Not exactly what I want, but close.  I'd love to have a command like mark-page that grabs the tiddler I'm editing (without name), so I can cut & paste.
*    Figured out that longlines-mode interfered with new-lines in {{{insert}}} calls.  Made a macro {{{twee-no-longlines}}}.
===
===
===
* Misc++++
*  Futzed with spam filering rules for conal.net and awakeningcompassion.net.  Came up with better coverage.
*  Lots of talking with Holly about our plans for Awakening Compassion, etc.
*  Brief chat with Sean Seefried.  Showed him my journal.  He hadn't see TiddlyWiki before.
===
*  TV:++++
*   Started [[TV description]].
===
*   My patch got messed up.  Copied off the changed source files and grabbed a fresh repo.
*   I noticed a bug I think I introduced in HsSyn: "f (&&&) g".  Backed out my hsNameStr change for now.  Really fixed it.
*   Got my configurations of alex, happy, & haddock consistent with library packages.  Now everything get configured as in [[cabal tips]].
*   Deactivated my html entities tweak to HaddockHtml.  It's a slippery slope and not entirely consistent.
*   Sent patch to Simon Marlow.  My previous {{{darcs send}}} attempt didn't get through, so this time, I used {{{darcs send -o FILE}}}.
===
===
*  I like the term "composable interfaces" to describe TV.  Does the term "tangible values" (and the short form "TVs") still apply?  Maybe: "tangible" in that they can be "touched" (interfaced with) from outside.  Try on this broader understanding for a while.
*  Added boolean input & output to Phooey & TV.
*  Added default visualizations (type class {{{DefaultOut}}}).
*  Idea: arrange for lazy string evaluation in UIs.  Rather than send the whole string over to wxWidgets, just send over the visible portion.  When more becomes visible, redo the conversion.  For efficiency, extend in chunks of //n// characters.
*  I want source code links.  Haddock has some new flags, and [[hscolour| http://www.cs.york.ac.uk/fp/darcs/hscolour]] can generate anchors for each definition, in addition to doing the coloring.  I darcs-got, compiled, and installed hscolour (so easy now, with [[Cabal| http://haskell.org/cabal]].)
===
* Misc++++
*  Switched from Trillian back to Gaim (2.00beta5).  I couldn't figure out how to get Trillian to alert me when someone spoke my nickname.  Gaim does it fine.
*  Found [[GlovePIE| http://carl.kenner.googlepages.com/glovepie]], a "Glove Programmable Input Emulator".  Works with eMagin z800 and Wiimote.  More info on the [[P5 Glove| http://scratchpad.wikia.com/wiki/P5_Glove]] (and [[on Wikipedia| http://en.wikipedia.org/wiki/Wired_glove]]).  They're pretty cheap ($40-50 on ebay) and may work well with eMagin HMD. I joined the [[yahoo group| http://tech.groups.yahoo.com/group/p5glove]]. I emailed an inquiry to the address on the [[product page| http://www.alliancedistributors.com/Alliance_Brand/Products.php]]. Heard back "Minimum of ten". For sale and videos [[here| http://www.vrealities.com/P5.html]]. === * TV++++ * I have hscolour working, to color my syntax and generate anchors. The trick now is to get my sources pre-processed with {{{-D__HADDOCK__}}}, to eliminate the bits Haddock doesn't grok. Cabal can do that, but it doesn't pass the {{{--source-module}}} and {{{--source-entity}}} flags that I need for definition anchoring. Sent note to libraries@haskell.org. Duncan Coutts suggested {{{--haddock-args}}}. I implemented it. Seems to be working, but oops. === * I borked my cygwin installation in the process. Recovering.+++ * Uninstalled msys and "msys developer tool kit". I only installed them to get {{{darcs push}}} working with its ssh, and now it seems that the standard recommendation is to use the ssh and windows installation notes on the darcs site. * Rebooted. Still borked. (Symptom: "grep" doesn't catch anything. Provocation: I did {{{rm -r /usr}}} in a shell, forgetting that {{{/usr}}} would be interpreted as {{{c:/cygwin/usr}}}.) * Cygwin setup didn't find & fix the problems, so I said to revert all cygwin packages to the previous version, and then again to the current version. Rebooting .... * Now missing cat & date. Oh -- there is a "reinstall" option in cygwin setup. Doing a global reinstall. Gee. I wonder if it's installing even packages I hadn't previously installed. Hm. That'd be a lot. In retrospect, I could have done a global reinstall from my //local// files. Rebooting .... * Now bash starts up with "regtool: command not found". What's calling regtool? * Hmm. I did another setup, this time being sure to say from the internet. Maybe last time was local. Rebooting .... * Still complains of missing programs, but the messages say ": command not found", so I don't know what's going on. Idea: punt cygwin and just use msys. * The instructions on [[building GHC for Windows| http://hackage.haskell.org/trac/ghc/wiki/Building/Windows]] describe another way to get just the right cygwin packages. * I uninstalled MinGW also. I'm confused about whether it's needed, in addition to msys. Remember this message.+++> When you install MinGW I suggest you install it to C:/mingw (replace C: with the drive of your choice). Then create an /etc/fstab file with a line that has a value similar to: {{{C:/mingw /mingw}}}.=== This too.+++> MinGW-1.1 has a version of make.exe within its bin/ directory. Please be sure to rename this file to mingw32-make.exe once you've echo installed MinGW-1.1 because it's very deficient in function.=== * Hmm. I'm missing a lot of good stuff. Bag msys & mingw and go for a straight cygwin, from scratch. * Oh -- I missed some. Try with just MinGW for starters. Then add msys and then msysDTK. Removed from my PATH: {{{c:\cygwin\usr\local\bin;c:\cygwin\usr\bin}}} and {{{;c:\cygwin\usr\local\lib}}}. Keep {{{c:\msys\bin}}}. * No bash in basic MinGW. Added binutils, mingw-utils, mingw-runtime, gcc-core. Still no bash. * Installing msys & msysDTK. Changed SHELL from {{{c:/cygwin/bin/bash}}} to {{{c:/msys/1.0/bin/sh.exe}}}. //Did not// remove CYGWIN setting of {{{binmode}}}. * Still no bash, so I changed my emacs {{{explicit-shell-file-name}}} to "sh". See what happens. * I'll defintely need my old c:/cygwin/usr/local. Copied it from c:/was-cygwin/usr/local. * Re-adding {{{c:\cygwin\usr\local\bin;c:\cygwin\bin;c:\cygwin\usr\local\lib}}} to the end of my PATH. Changed my emacs shell back to "bash". See what happens. * Bash still complains about missing, unnamed programs. Try taking msys & mingw out of the loop. Remove {{{c:\mingw\bin;c:\msys\1.0\bin;c:\msys\1.0\local\bin;}}} from PATH. Rebooting .... Reinstalling cygwin from the internet, using just the defaults. * Discovered that the error messages come from blank lines in my .bashrc and .aliases files. My hunch: bash is confused over dos vs unix end-of-line conventions. In my cygwin setup, I had the "default text file type" set to "dos/text" rather than the recommended (unix/binary). Changed to binary. Removed cygwin and cygwin-packages and reinstalled. Again chose the default set of packages. * Still get the complaints. I ran dos2unix over my .bashrc and .aliases, which helped a lot. Hmm. Isn't that why I want "dos/text" mode? Now I just get one from {{{bash --login}}}, which happens with the desktop short-cut. I don't remember the point of that flag. * Helpful hint from Cygwin FAQ: [[How can I copy and paste into Cygwin console windows?| http://cygwin.com/faq/faq.using.html#faq.using.copy-and-paste]]. Turn on "quick edit mode" in the console properties. Use left to mark & copy (right click), right to paste. === * I read through a long discussion thread on libraries@haskell.org from January 2006, called "Idea to allow people to comment on Haskell docs". I think this thread led to the idea of source code linking and using hscolour. Duncan also added a way to link to a wiki page for user comments. I like that idea a lot. At the very least, set up a wiki page for comments on each package I release. * Cygwin etc+++ * Text mode makes more sense to me. Uninstalling & reinstalling cygwin yet again. Removed the CYGWIN environment variable (was set to "binmode"). To do: [[read about this variable| http://www.redhat.com/docs/manuals/gnupro/GNUPro-Toolkit-03r1/gnupro_8.html#SEC58]], and try some values, including "ntsec ntea". (smbntsec may have made our Samba-based network media drive work.) For now, "ntea ntsec tty". Could also try "nobinmode", though I expect that would cause problems, too. Rebooting.... Still the same. * I'm going to leave things as they are now, with my .bashrc and .aliases altered for unix end-of-lines. See if any other symptoms arise. * Next, get a darcs-friendly ssh. Follow [[these directions| http://darcs.net/DarcsWiki/WindowsConfiguration]]. * I can use PuTTY to generate private & public keys. No, I already did that in July. Just use that one. Yep, it works. Added Pageant to my Windows startup group. * But a different problem surfaced. My interaction with remote shells is broken under both msys or cygwin bash on my end. Specifically, my tab key completes but doesn't show the completion, the emacs commands like absolute-backspace don't work, and control-p for previous command shows a triangle shape, though it will re-execute the last command. Wtf? :( * I can't just avoid the newline problem. It happens with every shell script, e.g., the "darcs" shell script wrapper I just installed. That's a show-stopper. What can I try next? * Used unix2dos to convert my .bashrc and .aliases back to dos style. Reinstalling cygwin with "binary" default text mode. (See [[this recommendation| http://cygwin.com/cygwin-ug-net/setup-net.html#id4725456]] in the setup directions.) Setting CYGWIN to tty (in hopes of fixing my remote server problem). See [[CYGWIN variable explanation| http://cygwin.com/cygwin-ug-net/using-cygwinenv.html]]. * Found [[bash announcements| http://www.cygwin.com/ml/cygwin-announce/2006-12/msg00026.html]] about the problem I'm having. Summary: (a) use dos2unix whenever needed, and (b) use the igncr shell option. Both worked for me, but I prefer the latter. I did it by setting SHELLOPTS in my environment variables setting WinXP's System Properties, rather than in .profile or .bashrc, so that emacs would get it also. * Chatted with Joseph. He uses [[securecrt| http://www.vandyke.com/products]] as his shell. I'll check it out, but first [[rxvt| http://www.cygwin.com/faq/faq_toc.html#TOC65]]. * rxvt: same problem. * securecrt: wow -- works great! Way better than what i'm used to. Nicely customizable. Emacs support works great. Nice coloring in the shell. * While darcs is working for me, I still have the "plink" warning message ("{{{plink: unknown option "-O"}}}"). Searched and found many reports. I got the latest [[daily darcs| http://glozer.net/darcs/daily/?C=M;O=D]] and set it up to be {{{c:/bin/darcsdir-cygwin/realdarcs.exe}}} (stashing the old one). No more warning message! === * I'm very pleased with the outcome of my system changes.++++ * Using PuTTY for my ssh, as recommended for darcs. * Using the latest darcs a realdarcs.exe. * Not using MinGW or MSYS. They're installed but not used. Might need for compiling ghc. * Using securecrt as my terminal program. Emacs & coloring. === * Haskell projects:++++ * Got my Cabal tweak working. Sent a note to the libraries list asking for feedback. Now my Makefile generates links to syntax-colored source code. * Next, change the choice of colors, at least for punctuation. Look into hscolour's CSS option. A drawback is that I'd have to have a copy of the css for each source directory. Stick with HTML. I made a copy of Malcolm's [[sample .hscolour file| http://www.cs.york.ac.uk/fp/darcs/hscolour/.hscolour]], edited the choices, and placed it in my home directory. I could instead have placed it in the project directories, but I'd rather not make multiple copies. I love his use of a Read/Show data type for the config.+++ {{{ ColourPrefs { keyword = [Bold,Foreground Blue] , keyglyph = [Foreground Red] , layout = [Foreground Red] , comment = [Foreground Green] , conid = [Normal] , varid = [Normal] , conop = [Bold,Foreground Red] , varop = [Foreground Red] , string = [Foreground Magenta] , char = [Foreground Magenta] , number = [Foreground Magenta] , selection = [Bold, Foreground Magenta] , variantselection = [Dim, Foreground Red, Underscore] } }}} === * Next? Links for a wiki page.++++ * I see the flags, {{{--comments-base=URL}}}, {{{--comments-module=URL}}} and {{{--comments-entity=URL}}} for haddock. For now, just use the first two. * Idea: rather than putting docs on darcs.haskell.org, put them on the Haskell wiki. That way, comment links could be rendered in such a way that they reveal whether or not there are comments. But, hmm. Then haddock would have to generate wiki markup. Interesting, since haddock has its own wiki-like markup. * I notice that the [[Gtk2Hs page on haskell.org|http://haskell.org/gtk2hs]] is a WordPress blog. Good idea and very pretty. * Where to put a wiki page for my projects/packages? Look at http://haskell.org/haskellwiki/Special:Categories. * Sent a note to haskell libraries list about comments & wiki pages. === * Realized that entity source pointers should be to the %{FILE} (where the code really is), not %{MODULE} (current exporting module). But with preprocessing, the temporary pre-processed file gets used instead of the original. Probably an oversight. I sent a query. * Next, continue on my TV explanation. Do I really want to be putting it in the haddock doc. Maybe better in the project wiki page. On the other hand, the haddock doc gives me links from example references to the entity docs, which is pretty sweet. * Progress on my TV library commentary. Next: more of the algebra (but may be better in the other modules), the general story with arrows, //composition// and {{{DeepArrow}}}. === * No idea how [[this page| http://www.oncotton.co.uk/peter/index/A4PAPERCUT_000.htm]] came up on my browser, but I love it. Here's [[the entry page| http://www.oncotton.co.uk/peter]]. * Haskell projects++++ * Got a reply from Duncan, saying that %{FILE} ought to track the Haskell line pragma in the source and hence be the original source. I experimented some and found that the {{{-optP-P}}} flag leads to these pragmas being lost. Sent a reply. At Duncan's suggestion, I commented out the "-optP-P" (in Cabal's {{{Distribution/PreProcess.hs}}}), and now everything works great. * Oops -- noticed a parsing problem.++++ {{{ iPrim :: () ~> a -> Input (~>) a }}} === This declaration doesn't parse unless I parenthesize {{{() ~> a}}}. Compare the [[GHC grammar| http://darcs.haskell.org/ghc-6.6/ghc/compiler/parser/Parser.y.pp]] with my tweaked Haddock grammar. The two grammars don't correspond very closely, and I'll be glad when there's just one. I moved the infix production from doctype to tydoc, which makes sense to me. I wonder why the ghc grammar works. * Also found an unparsing bug. Type operators weren't always getting parenthesized when used non-infix. I moved the parens from ppHsAType to ppHsName. * Changed my use of {{{arr}}} to {{{~>}}} in my code. * Worked on getting external library references to point to locations on the web, rather than to my local copy. Ran into a block with Haddock's apparent bias toward --use-package over --read-interface. I can get Cabal to insert the latter, but I don't know how to get it to remove the former. * Idea: [[wiki haddock]]. Sent to haskell libraries discussion list. === * Cygwin stuff++++ * Darcs fails.++++ {{{ bash-3.2$ darcs pull
libcurl: couldn't connect to host))
}}}
===  My hunch: my new bash {{{igncr}}} setting.  Try turning off this setting.  After also doing a dos2unix over the darcs script, my "darcs pull" worked fine.
*  So, I removed my SHELLOPTS setting and did {{{dos2unix .bashrc .profile .aliases}}}.  I'll do more dos2unix as needed.
===
*  Wrote a [[note about TiddlyWiki and Haskell]] and thoughts on [[using TiddlyWiki to view programs]].
*  More TV writing.
*  I want a TV composition example, to show the power of separable interfaces & values.  Simple idea: take apart a string into words ({{{words}}}), reverse each word ({{{reverse}}}), and put the result back together ({{{unwords}}}).
*  Added Emacs bindings {{{\C-'}}} and {{{\C-@}}} to surround words (expressions) with tick marks or at signs, for Haddock documentation.
===
* Men's group ++++
*  Got missing email info.
*  Sent note about kick-off meeting.
*  Set up [[TimeToMeet|timetomeet.info]] meeting and sent.  Heard back from Mitch and Roger.
===
* TiddlyWiki ++++
*  Tweaked twee.el to sometimes expand-abbrev.
*  Idea: [[tiddlers in your favorite editor]].  Sent to the TiddlyWiki and twee-code google groups.  Heard back from Bob M.
===
* TV ++++
*  I'd like to extended the {{{DefaultIn}}} and {{{DefaultOut}}} type classes to handle lists in a general way, but still handle String (list of Char) in the most familiar way.  The {{{Read}}} and {{{Show}}} classes have a special trick.  Can I adapt it?  Made a first attempt. +++
{{{
class DefaultIn a where
defaultIn     :: CInput a
defaultInList :: CInput [a]

instance DefaultIn Bool where
defaultIn     = boolIn False

instance DefaultIn a => DefaultIn [a] where
defaultIn     = defaultInList
defaultInList = error "defaultInList: not yet defined for [a]"

instance (DefaultIn a, DefaultIn b) => DefaultIn (a,b) where
defaultIn     = IPair defaultIn defaultIn
defaultInList = error "defaultInList: not yet defined for (a,b)"
}}}
=== What to do for lists and pairs?  Similar situation for {{{DefaultOut}}}, which must also address //functions//.  Whatever I do must apply to all {{{CommonInsOuts}}} arrows, in particular, {{{UI}}} and {{{KIO}}}.  I can imagine reasonable solutions, but I'm not clear on what general policy to impose.
*  Postpone this design question.  Copy my attempt to a new {{{DefaultsList}}} module, and revert to the old {{{Defaults}}} for now.  Keep {{{DefaultsList}}} in darcs and in {{{TV.cabal}}}, so it will get compiled and even have Haddock declaration, but don't export it.  Leave comments to encourage others to think about the problem.
*  Cleaned up my TV Makefile some.  I learned about {{{mkdir -p}}} (make intervening directories if necessary) from dons on #haskell and so removed the one ugly hack I had in my Makefile.
===
*  Wrote some [[tips on using hscolour]].  I want it to be a blog article.  I can copy & paste the rendered version of this tiddler into my [[Blogger blog| http://conal-elliott.blogspot.com]], but I lose CSS and fancy TiddlyWiki features like +++^[nested sliders.]like this one===  Instead, I want my blog to be a subset of this journal, say, determined by a special tag.
*  Idea: [[haddock for library illustration]].
===
* [[Emergence| http://emergence.awakeningcompassion.com]] ++++
*  I want to guide those leaving feedback about somebody, so that everyone knows what actual experience the reviewer has.
===
* Misc ++++
*  Asked for help on #emacs ++++>
does anyone know a tight way to compute a string of n spaces in elisp?  extra credit for generality (e.g, n repetitions of a given character).
=== Learned about {{{make-string}}}, which is perfect: {{{(make-string n ? )}}}.
===
*  What to do next ++++
*   [[TV]]: Add a bit about ideas for other arrow, like {{{Kleisli HAppS}}}.  Change the module structure.  Decide location for {{{Cofunctor}}}.  Look at to-do list.
*   [[Phooey]]: Change location of library docs on server.  Add source & comment links.
*   [[DeepArrow]]: Docs, examples.
===
*  Chatted with Sean Seefried (saved in Gmail and in my Chats folder). ++++
*   He helped me get my public key working with darcs.haskell.org again.
*   We discussed how to describe TV and where to put it in the module hierarchy.  The conversation reminded me that the whole //hierarchy// paradign doesn't work for me.
*   This slogan popped into my mind: "Ready to use & ready to reuse"?  We both liked it.  A variation: "Usability meets reusability".  Or "User-friendly meets reuse-friendly", or even just "Use and Reuse".
===
*  On-going conversation about Cabal's use of a textual package description vs a Haskell DSEL.
===
* Misc ++++
*  Idea: [[quotation TiddlyWiki]]
*  [[note to Sandy about server software and the power of computers]]
===
*  Some conversations on cabal-dev and #haskell about Cabal and its use of a textual specification instead of a DSEL.
*  Decided to keep the package name "phooey" in lower case.  I've gone back & forth on this one.  People seem to be using all lower case if a single word and mix in upper case for acronyms and multi-words.  Okay by me.
*  I wanted some more order, so I moved my current Haskell projects into ~/Haskell.  I'll move others (Eros, Pajama) when I revisit them.
*  There will be a lot of commonality among make files, so I created [[cabal-make]]{{{.inc}}}, which is shared include file in {{{~/Haskell}}} and worked to make it customizable.  Include it from my projects.
*  Recompiled Iavor's monadLib package using my {{{cabal-make.inc}}}, which helped me get bugs out.  I copied the resulting docs to the server and Iavor a note asking him to look and to copy them to his repo, or preferably, to try generating them himself, and to give me some feedback.  I also sent a patch with some small code tweaks that came up.  Oh, oops.  I just remembered that I have some personal patches to haddock needed to build these docs.  Iavor won't be able to do it himself until I get the patches into Haddock.  Sent another note.
*  Next I'd better get my {{{--no-use-packages}}} Cabal flag in place.
*  The monad library I was using (Iavor's {{{monadLib-2.0.1}}}) is now obsolete, so I'm trying the new version.  Start with [[DeepArrow]].  First, I did {{{darcs record --all}}} (my first in the project), so I can back out.
| //Old// | //New// |
|Id a //application//|return a|
|Id a //pattern//|a = runId ida|
|updateR|new def in Phooey.TagT|
=== These changes sufficed for Phooey.  Hurray!
*  Now I can test {{{--no-use-packages}}}, and I see that I broke {{{--haddock-args}}}.  I lose all of my args.  Simple test: {{{./setup haddock --haddock-arg=--bloonk}}}.  Found it.  Missing capitalization of a constructor in a match clause.  Was never falling through to the {{{HaddockOther}}} case.
*  [[DeepArrow]], [[Phooey]], and [[TV]] are all building fine.
===
* Men's group ++++
*  Mathias is available any evening.  Prefers not on Friday or Saturday.  Prefer not to do it the same week as the NVC group.
===
*  I'm ready to decide on the modules for TV.  Use {{{Graphics.UI}}} only for the small {{{UI}}} module.  Otherwise, take a new top-level piece of the name space {{{Interface}}}.
*  That one small module is the only reason TV depends on Phooey.  Supposed I moved the UI piece out of TV (though it was the sole original motivation for TV).  Where would I put that piece?  A whole new package?  If so, what to call it?
*  Maybe better to release as one package (dependent on Phooey) and ask for feedback.
*  I better get more IO examples going.
*  Cofunctor location?  Other Cofunctor functions?   For now, use {{{Data.Cofunctor}}} as a placeholder and leave a comment inviting others to join in and make another place for it.  Then use that place in a future release.
*  Renamed {{{lib-make.inc}}} to [[cabal-make]]{{{.inc}}}.  More descriptive and inviting for other Haskellers.  Give it a Haskell wiki page.
*  Considering the name //functoid// for the generalization of function used with deep arrows.  That word already has some uses. +++
*   [[ecommerce context string mappings| http://en.wikipedia.org/wiki/Functoid]] (also called "maporator"!)
*   [[FC++| http://www-static.cc.gatech.edu/~yannis/fc++/boostpaper/fcpp.sectfull.html]]
*   Informally [[for xml schema mapping| http://www.urbandictionary.com/define.php?term=functoid]].
=== There may be more.  Perhaps I better look for another word.  How about "functoon"?  Seems to mean "function" in some language, but I don't know which.
*  Decided to put package pages at the top level of the Haskell wiki, rather than under "Packages".  Use tags (several per package), not categories!
*  Idea: Give my projects a sort of "inverse FAQ", in which I ask questions of my users.
*  Correspondence with Iavor about {{{monadLib}}}, including my choice of {{{monadLib}}} vs {{{mtl}}}.  He mentioned that {{{mtl}}} is currently distributed with Hugs & GHC (though maybe not for long), so it's more convenient for users.  I didn't realize that.
*  Discovered that hscolour didn't make anchors for {{{newtype}}}.  Fixed and darcs-sent Malcolm the patch.
*  Mediawikis have "talk" pages that correspond to each content page.  Is that where I want comments to go, rather than a "Comments" page?  What would be the corresponding content page?  I don't know.
*  Wrote up a [[DeepArrow wiki page| http://haskell.org/haskellwiki/DeepArrow]].  Here's a bit saved for the TV page. +++
While thinking about the code for rendering these interfaces into GUIs, I noticed that it contains almost nothing but generic arrow operations.  (//Go to talk about generalized interfaces//.)===
===
* Misc ++++
*  Gedanken experiment: Imagine using my personal storage and all content on the web entirely without names (addresses) and classification, using (multiple) tagging and searching instead.  There could still be a sort of "name" as a unique identifier, but it would have no semantic content.
*  [[Pandion|http://www.pandion.be]] is an open-protocol, encryping IM client that is extensible via HTML and Javascript.  Imagine cool stuff I could do with Javascript, especially automatically generated from a DSEL and/or Yhc.
===
*  Switched from monadLib to mtl. +++
*   DeepArrow: easy (Id -> Identity)
*   Phooey: not so easy.  See replacements below.
*   TV: Trivial (Id -> Identity)
=== I had to make several replacements. +++
|updateR (mine)|local|
|BaseM|???|
|inBase|lift^n|
===
*  Urk. Hackage wants a single "category" for my libraries.  What category to give DeepArrow?  I made up "Composition".
*  Gave Cabal another Haddock flag {{{--no-hide-other-modules}}}.
===
* Misc ++++
*  Trying out [[mozex| http://mozex.mozdev.org/development.html]] for Emacs via FireFox.  It's more customizable, and it lets me set up a keyboard shortcut (control-space).
===
* Misc ++++
*  Consider using longlines for my README & CHANGES files.  Will they then wrap when viewed in a browser?  No.  At least not under FireFox.  Oh well.
===
*  Lots of great progress.
*  Phooey 0.1 is ready to ship.
*  Idea: instead of defining both curried and uncurried definitions, use {{{uncurryA}}}.  Catch: loses the overall title.  Fix it, by having {{{curryA}}} and ilk preserve & transform outer titles.  Did it.
*  I got all three projects released, announced, and added to Hackage.  Quite a project.
*  Integrated my Cabal tweaks (new Haddock flags) with other patches.  Made a patch for all but the -optP-P issue, and sent to Isaac J.  Query about approach to -optP-P.  When that's all straightened out, I'll release [[cabal-make]].
===
* Misc ++++
*  What do I want to focus on next?  Blogging!
*  Started list of [[blogging topics]]
I'd like to turn source code (mine and others') into a fully hyperlinked form, in which every name reference links to the name's definition.  Syntax-coloring would be great also.  I see Programmatica.  Is it in use, supported, and reasonably easy to install and use?  Are there other options?
*  I want to rearrange my tiddler view so that tags & tagging are out of the way.  I like the [[MPTW| http://mptw.tiddlyspot.com]] styles, but I don't think I can use them until ZiddlyWiki is caught up to TW 2.1.
===
===
* Blogging ++++
*  [[interfaces -- combined and separable]]
*  Clarifying a research question for TV ++++
*   TV+Phooey can be thought of as a functional approach to GUIs.  Or maybe the interface part, of type {{{Output UI a}}}, is the type of GUIs for values of type {{{a}}}.  So far TV and Phooey GUIs are very simple.  Does the TV approach scale up to arbitrarily rich GUIs?  To find out, take some rich examples of GUI applications and tease them apart into two parts: the pure value and the GUI.  Then further decompose into simpler TVs.
===
===
* TiddlyWiki: ++++
*  Got tags & tagging out of the way.   Were right-floating.  Now top & bottom.  Lots of CSS tweaking.
*  Tweaked AutoTaggerPlugin so it won't self-tag journal entries.  I'd turned off auto-date-tagging a while back, because of self-taggin.  Now back on.  Emailed my tweak to Eric Shulman (who wrote the plugin).
*  Fixed the problems preventing my TiddlyWiki from working in Internet Explorer.  The problems both came from defining a JS variable in one tiddler that is used in another.  All is fine when I instead extend the core variable {{{config}}}.  I changed "{{{syntaxify}}}" to "{{{config.macros.syntaxify}}}" In SyntaxifyPlugin, and "{{{declareRewrites}}}" to "{{{config.macros.rewrite.declare}}}" in RewritePlugin.  I don't know why dependent plugins can successfully access {{{config}}}, but not {{{syntaxify}}}.
===
* Misc ++++
*  Holly & visited Audrey (approaching 14 months old) and went for a walk in the park.  Then lunch with Pat.
*  The PSNCC mailing for [[trainings| http://psncc.org/trainings]] came out this morning, with five of our [[Awakening Compassion| http://awakeningcompassion.com]] classes (four free intros and two five-week series).
===
*  Tweaked Cabal so that it checks the Haddock version before choosing
*  Adapted [[software releases 2007-01]] as a [[blog entry| http://conal-elliott.blogspot.com/2007/01/software-releases.html]].  Eventually I want my TW to be my blog, say with a special tag that identifies tiddlers as blog entries.
===
* Misc ++++
*  Got myself a new monitor: 24-inch Gateway FPD2458W.  Huge improvement in space.  Color looks great.  Tiltable (with auto-tilting desktop.)  But there's significant ghosting of black text on white background.  The text isn't as sharp as I'd like, but maybe that's the same issue.  Tried Holly's computer.  Looks great, so the problem is with my computer rather than with the monitor.
*  Looking for a replacement computer: one that has an nVidia card (for stereo head-mount display) and dvi (for high quality signal into the new monitor).  I'd like something fairly light for travel.  Recent graphics processor, for future GPU work (Vertigo re-do).
===
*  There's now a [[wxHaskell wiki page| http://www.haskell.org/haskellwiki/WxHaskell]].  I added pointers to Phooey and TV and found a tip on layout"Got layout troubles? Packing things inside yet another panel seems to help sometimes".
*  Yep, using that tip, I finally fixed the my horizontal layout problem.  Now all widgets stretch properly.  This fix will improve TV as well for pair inputs and outputs.  Pushed the Phooey patch to the server.
===
* [[Computer shopping| computer shopping 2007-01]].  Called Fry's.  Their PC notebooks don't have DVI.  Only one nVidia notebook: 17\" Toshiba Qosmio.  One left, $2700, core 2 duo, 2.0 ghz, 240 GB HD (2x120), nVidia 7600. * More [[computer shopping 2007-01]] * Joseph pointed me to a neat demo: http://labs.live.com/photosynth. Didn't work for me, but try again later. * Haskell projects ++++ * What's my direction from here? ++++ * Blog about the grading example. * Rebuild Eros on my recent pieces. Figure out how to do gestural composition without code redundancy. * Abstract out wxHaskell specifics from Phooey & TV. Add a gtk2hs implementation. Use type classes. Split Phooey into PhooeyCore, PhooeyWx, and PhooeyGtk. * Write up Phooey. Target ICFP? === * [[installing Gtk2Hs]] === * Misc ++++ * To do: generalize [[smart quotes]] plugin and use it for "{{{@ ... @}}}" in Haskell code. Did it: [[rewrite delimiters]] and [[rewrite delimiters -- personal]]. But, oops, my approach to Haskell delimiters is bogus. the plugin re-wikifies the replacement delimiters separately, not in context. So I'm deleting these new plugins. (They'll hang around in the Zope store.) * Called Portable One to [[ask about Asus A8Js]]. === * TiddlyWiki ++++ * To support literate Haskell as TW source, I fixed up RewritePlugin to handle LHS and RHS substitution patterns. Now the at sign syntax is super-easy to specify. See [[RewritePlugin: Haskell]]. * Changed SyntaxifyPlugin to syntaxify in-line style code. In the generated shadow StyleSheetSyntaxify, I factored the font setting out of {{{.viewer div.syntaxify}}}. * Added a configuration option to SyntaxifyPlugin to determine whether to show lines in alternating colors (which I don't like). * Send the SyntaxifyPlugin changes to Bob M. * Now I think I'm ready to go back to my blog post about TV. Urgh. I rediscovered the unfriendly interaction between mmm and longlines modes in Emacs. When a mmm buffer opens up in longlines mode, the long lines actually get broken. Rats. What the heck do I do about that? === * TiddlyWiki ++++ * Correspondence with Bob M about my tweaks to his SyntaxifyPlugin. * My single-identifier haskell expressions are all getting interpreted as definition lines, due to the simple regexp I used. Fixed! Javascript regexps are powerful! I love the lookahead feature. Too bad there's not a "lookbehind". === * Haskell projects ++++ * Finished a first draft of [[separating IO from logic -- example]], in the style I'd to use. It's a literate Haskell program that also works as TiddlyWiki markup. Ran into some issues. ++++ * When I use longlines mode, Emacs and GHC have different ideas about line numbers, so my error messages are off. * Line spacing rules in TW are such that I have to remove some of my empty lines to eliminate unwanted double empty lines. Come to think of it, that happens a lot, following bulleted lists and code blocks. One place this different bites me pasting into GMail. Fix the problem in TiddlyWiki and update my tiddlers accordingly. (Tedious, but better sooner than later.) === === * Corresponded with Bob McElrath about SyntaxifyPlugin and related issues. * Experimented with copying rendered tiddlers to my Blogger blog. Requires copying some of my TW CSS into the blog template. Then I realized that RSS feeds & aggregators won't get the CSS, so my posts will look crummy. Instead, I'm going to use my blog for teasers that point to tiddlers in my journal. This way I can also use fancy stuff like nested sliders, Pajama-generated Java applets, etc. And it may draw Haskell folks into TW. * Progress on [[separating IO from logic -- example]]. I'm increasingly doubtful about my @Cofunctor@ instance for @Output@ in TV, because I don't know how to combine with the DeepArrow algebra. Type the text for '2007-02' * On Friday & Saturday, Jake & I attended [[RecentChangesCamp|2007.RecentChangesCamp.org]]. * Yoips. The eMagin 3D visor just jumped from$549 to 1500. See [[this forum discussion|http://www.3dvisor.com/forum/viewtopic.php?t=894]]. I already bought mine, but the price jump means fewer people would be able to use any development work I were to do for it. * Idea: make a TW formatter that auto-links to Haddock definitions. For similarity to Haddock, use the notation 'foo'. Make specially tagged "export tiddlers" containing a module URL and list of names. At TW initialization, these modules are parsed to make a table that maps names to URLs. Alternatively, do some ajax magic to make the documentation appear as an overlay. Maybe hook up with Hoogle. Or use the {{{.haddock}}} files. Use the single quotes in regular text (as in Haddock documentation), but drop them in code. * I tried to factor out file reading in [[separating IO from logic -- example]], but it doesn't type-check. +++ \begin{code} coalesceT_2 = tv (oLambda (fmap parseTasks contentsIn) (cofmap show defaultOut)) coalesce gradingT_3 = fromFile "tasks" ->| coalesceT_2 ->| summarizeT ->| toFile "summaries" grades_6 = runTV gradingT_3 >> runTV (fromFile "summaries") \end{code} === The problem is that @coalesceT_2@ has a @Task@ input, not a @String@ input. * Another bit excised, since the mentioned functions are not released yet. +++ This pattern of using an identity function and a file is generally useful, so it has a name, "[[fromFile| http://darcs.haskell.org/packages/TV/doc/html/Interface-TV-IO.html#fromFile]]". The command above is equivalent to "@runTV (fromFile "tasks")@". (There is also "[[toFile| http://darcs.haskell.org/packages/TV/doc/html/Interface-TV-IO.html#toFile]]".) === * Getting my new computer set up * I'm having eye strain. Is it the new computer, or (giant) monitor, or something else? I've turned the contrast & brightness way down on the new monitor. * Emailed in a scan of my computer receipt for the free upgrade from XP Pro to Vista Business. * Some TW correspondence about inheritance & TWs. * Still mulling over [[composition and interfaces]]. I want to resolve how the DeepArrow composition combinators apply when the shapes aren't as expected. It may be possible to decompose even primitive pair consumers into pairs of primitive consumers, and then compose in the arrow being @first@- or @second@-lifted. Even for a primitive function output, in some sense must have a producer of arguments and a consumer of results. For now, however, I think I'll make the composition functions partial. I can then eliminate the empty and composition constructors for @Input@ and @Output@ and yield dynamic errors. Give it a try. * The new computer runs [[Pan| http://conal.net/Pan]] toys about 5x as fast as my old computer. Wow! * Emacs ++++ * Worked on the problematic interaction between linelines and mmm modes. ++++ * If I turn off longlines before turning on mmm, mmm still creates line breaks in the long lines. * On the other hand, if longlines was never on to begin with, mmm does not create line breaks. * Set some pause points (y-or-n-p) in the mmm-mode code. The breakage happens during {{{(funcall mode)}}} in {{{mmm-update-mode-info}}} in {{{mmm-region.el}}}. I guess what happens is that the line breaks ought to be removed but they're not. * Finally fixed this long-standing problem with a tweak to mmm-mode-on. * Emailed my fix to the person maintaining mmm-mode, and asked him to integrate the fix. * Learned a useful trick. To track line numbers while compiling, simply turn off longlines mode. === * Learned about emacs refill-mode. Great for typing auto-filled contexts. As I enter text, the paragraph refills itself. * Tweaked my Emacs default frame parameters to better suit my new machine. More elegant and robust than my previous setup. * Use mmm in TWs for submode content other than Haskell code: html, css, and javascript. Found & installed css-mode and javascript. === * TiddlyWiki ++++ * Wondering how to free up space in my TW now consumed by the SideBar. Studied Eric S's [[TiddlyTools| http://www.tiddlytools.com]]. He uses NestedSlidersPlugin and MoveablePanelPlugin. Powerful & simple. I grabbed a bunch of related plugins from TiddlyTools. I don't yet have the MoveablePanelPlugin stuff working for me. * Discovered that SyntaxifyPlugin clobbers the original customClasses formatter. Fixed it. Sent note to Bob M about the change. Looked at the [[trac setup| http://trac.tiddlywiki.org/tiddlywiki/browser/Trunk/contributors/BobMcElrath/plugins/syntaxify.js]]. I want to learn to work with trac and svn. === * Misc ++++ * Trying out Second Life, to see how my new computer does. * Idea: implement twee and untwee by means of [[bi-directional arrow combinators| http://www.cs.ru.nl/A.vanWeelden/bi-arrows]]. Hmm... do bi-arrows fit DeepArrow? * There's a [[joystick emulator for the z800| http://raeldor.blogspot.com/2006/09/z800-joystick-emulator-released.html]]. === * Tinking and correspondence with Chris Klimas about twee/untwee. * I want to get [[separating IO from logic -- example]] out for people to read. Updated TV/CHANGES. This example does not depend on Phooey or wxHaskell at all, and wxHaskell is difficult to install. It might be a good time to split TV into a core TV (with KIO) and a TV-phooey. * TV-0.1 release +++ * Removed iEmpty & iEmpty and corresponding constructors. Generate dynamic errors earlier rather than later. * Removed ICompose & OCompose constructors. Redefined iCompose and oCompose (and consequently fmap on inputs and cofmap on outputs) to work only on (possibly titled) primitives. I don't know how to implement the DeepArrow algebra with OCompose. * Added wrapO and wrapAO, sort of analogous (or dual) to wrapF. I'm not sure the name is a great choice. I've also considered "unwrapO". * Added readShow, defined via wrapO. * Redefined interactRSOut via readShow and renamed "interactLineRS". Added "interactRS". * Added fromFile & toFile TV functions. * Added fileOut * Moved Grading example from src/Examples.hs to src/Grading.hs * Added "short lines" example to src/Examples.hs. * Added type alias RunTV for defining types of runTV specializations, such as runUI and runIO. === * TV-0.1.1 release: ++++ * Changed all files to *nix-style line endings. * Restored source pointers in docs. Had gotten lost. === * Big improvements in my build & test proces. Much more automated now. ++++ * Extract package name and version from .cabal file * Targets: {{{darcs-repo}}}, {{{darcs-tag}}}, {{{darcs-dist}}}, {{{test-get-build}}}. === Simplified [[project release check-list]]. * Pushed changes, made dist, updated hackage for DeepArrow-0.0.1, phooey-0.2.1, TV-0.1.1. * [[How and Why AJAX, Not Java, Became the Favored Technology for RIAs|http://ajaxworldmagazine.com/read/333329.htm]] (rich internet applications). See in particular the discussion of Flash+ActionScript as an alternative to Java. May be a good target for some of my work. * Separating out TV & GuiTV. * Released TV 0.2 and GuiTV 0.2. Mainly, the releases are about separating out GUI functionality so that core TV is easier to install. * Made [[teaser blog post|http://conal-elliott.blogspot.com/2007/02/separating-io-from-logic-example.html]] for [[separating IO from logic -- example]]. * Ran into strange problem with a class instance getting lost when re-exporting a module. Sent note to haskell list. * Submitted a [[bug report| http://hackage.haskell.org/trac/ghc/ticket/1145]] for the import problem mentioned yesterday. * I heard back that my TW fails under Opera (which I'd forgotten). Found & fixed the problem. Opera didn't like the regexp {{{{code} }}}used in [[RewritePlugin: Haskell]]. I added back-slashes before the braces, and now FF, IE, and Opera are all happy. Hooray! * Some choices for next focus ++++ * [[cabal-make]] ++++ * Set up a repo on darcs.haskell.org * Improve documentation: file comments, wiki, blog post === * [[TV]] ++++ * Come up with a simple [[HAppS| http://happs.org]] application and make it with TV. See the [[tutorial| http://www.haskell.org/haskellwiki/HAppS_tutorial]]. === * [[Eros]] ++++ * Continue the rebuilding of Eros on Phooey & TV. === * [[Phooey]] ++++ * Return to the Phooey paper. Explain how Phooey works. Inversion of control. === === * Thoughts on the [[Phooey]] paper ++++ * Mix some top-down & bottom-up in the presentation. Construct the UI arrow on top of the UI monad before constructing the UI monad. * Put it together first without the layout monad and then define and mix it in with a simple tweak. * Now the development is pretty simple ++++ * Define UI on top of UIM via the AmA arrow-and-monad transformer * Implement UIM on top of CB (the "callback" monad transformer) * Toss in simple frame creation (runWx). * Add layout, via TagT and LayoutT and a small tweak to UIM. === === * [[cabal-make]] ++++ * Correspondence with Eric Yow about my broken wxHaskell doc links. He think haddock changed its convention (dot vs dash for module name separators). I'm puzzled. I whipped up a shell script to make symlinks, at Eric's suggestion. * Switched to CSS, on ndm's suggestion. The cabal-make settings determine the hscolour.css file. I have a cross-project copy in {{{~/Haskell}}}. * Added a substitution for the haddock "FILE" variable (like the "MODULE" variable), so that I can put the hscolour'd source files all into one directory, which enables sharing of a common hscolour.css. Sent patch to Simon Marlow. * [[hscolour| http://www.cs.york.ac.uk/fp/darcs/hscolour]] flag changes. === * Getting [[lhs2TeX| http://www.iai.uni-bonn.de/~loeh/lhs2tex/]] working, with help from Andres. Works now. * Morning readings ++++ * [[OnceAndOnlyOnce| http://c2.com/ppr/wiki/WikiPagesAboutRefactoring/OnceAndOnlyOnce.html]] * [[MysticalProgramming| http://c2.com/cgi/wiki?MysticalProgramming]] * [[CodeSmell| http://c2.com/ppr/wiki/WikiPagesAboutRefactoring/CodeSmell.html]] * [[DrivingMetaphor| http://c2.com/cgi/wiki?DrivingMetaphor]] (and contained links): could apply to the game of [[Making Life Wonderful]]. * [[DependencyInjection| http://c2.com/cgi/wiki?DependencyInjection]] * [[A Lisp dream| http://xkcd.com/comics/lisp.jpg]] === * Made a [[user page| http://haskell.org/haskellwiki/User:Conal]] for myself on the Haskell wiki. * [[cabal-make]] ++++ * Explored alternatives for placing hscolour'd source code. Gave up. Added a comment to {{{cabal-make.inc}}}. +++> Note: (a) no project-level source link (--source-base), and (b) hscolour'd source code is generated at "dist/src.Foo.Bar.hs.html". I prefer (a) project-level source link and (b) "dist/src/Foo.Bar.hs.html", but I don't know how to get haddock to keep the first slash and replace the others. I took out source-base rather than link to a directory that mixes hscolour'd code and haddock docs. Better solution: add a hscolour flag for the hscolour.css file location, and put the sources back into a tree with links to that file. I don't know how to construct the necessary relative references, which would be something like "../../hscolour.css". Another idea: add per-source-directory symlinks to the shared CSS. Simple but less browser-cache-friendly. I don't know how to make this idea work either, since I don't know how to make real symlinks in my cygwin environment. === * Eliminated the requirement that the {{{Exposed-Modules:}}} Cabal spec line could not contain the first module. Made the regexp more flexible and added a {{{sed}}}. === * Correspondence with Wolfgang Jeltsch. He spotted a problem in my @AmA@ formulation in [[Phooey]]. I doubt there's a solution at the current level of generality, i.e., with an arbitrary monad for //m//. Also, my definition of @Source = IO@ fails to capture the read-only property of sources. Look for another formulation, and then revisit @AmA@. * First reading of [[Applicative programming with effects| http://www.soi.city.ac.uk/~ross/papers/Applicative.pdf]]. Swapped out @Monad@ for @Applicative@ in Phooey. Renamed AmA to AAA. * Thinking, talking with Holly, and correspondence about some NWCompass issues, including inclusion/exclusion and "trust". * Created a blog "[[Evolving NVC| http://awakeningcompassion.com/evolving]]". Wrote a note called "[[Trust that| http://awakeningcompassion.com/evolving/?p=3]]" and another called "[[Promises, predictions, and wishes| http://awakeningcompassion.com/evolving/?p=4]]". * Contemplating applicative functors & Phooey. * NVC ++++ * I'm gathering the understanding that the implementation of sociocracy got seriously off track at the Feb 3/4 retreat and since then. I've been talking with various folks, piecing together and mulling over what's going on currently in the group's efforts to implement sociocracy. * Blogging ++++ * [[Trouble at the beginning| http://awakeningcompassion.com/evolve/?p=7]] * [[Some comments on the word "trainer"| http://awakeningcompassion.com/evolve/?p=8]] === === * Haskell projects ++++ * Thinking about applicative functors for data-driven computation/evaluation. (Paper title: "Functional interfaces for data-driven evaluation".) Seems a great fit. Idea: instead of treating inputs & outputs differently, have outputs be IO-valued data-driven computations. === * Got some feedback from the sociocray yahoo group. Wrote another blog post: [[Clear aims| http://awakeningcompassion.com/evolve/?p=9]]. * Progress on applicative functors for UIs. So far it's a great fit. Pretty similar in implementation to the monadic style, but allows me to hide the value sources. I think it tracks dependencies much better than the monad (and better than the arrow layered on the monad). In "//liftAm f a1 ... am//", there's a dependency of the whole on each of the //ai//, but no dependencies among the //ai//. * NVC ++++ * Last night's class in "[[A Journey Through the Heart of Nonviolent Communication| http://awakeningcompassion.com/seattleunitywinter07.htm]]" was fantastic! We're delighted with our new approach to teaching NVC Consciousness & skills (not OFNR!). Our class members (we chose to be 15 of them) is loving it also. Sandy came last night and raved at length & depth afterward. * On Monday, we gave an NVC intro (same approach) to the [[Washington Health Foundation| http://www.whf.org]]. All of us present ate it up! * In-depth exchanges with Doug, Barb, Liv, and Sandy regarding our sociocracy, mission, and inclusion. * Some sociocracy correspondence in response to an internal dissonance between the my understandings of the intentions and the language. I'll blog about my insights soon on [[NVC Evolves| http://awakeningcompassion.com/evolve]]. * Long response to Sharon V who moderates the online sociocracy discussion group. She did not approve a note of mine describing my intention/language dissonance. * I'm spending a big fraction of my time in these NVC & sociocracy issues (and not as much on my ICFP papers). I'm fairly unattached to persuading anyone or getting my preferred policies, especially since I'm moving back to CA soon. Having my [[NVC Evolves| http://awakeningcompassion.com/evolve]] blog, however, gives me a place to publish the insights I'm getting. Some others on the planet can then discover these insights and join in community with me. That's enough for me. * I love this quote from Krishnamurth, describing his secret: "I don't mind what happens." === * Haskell projects ++++ * The applicative functor interface is working out very nicely for data-driven computation and for GUIs. Simpler and more efficient (tighter change propagation). * Next, what about TV? I've formulated it in terms of arrows. How about applicative functors? * There's a wiki page on [[Haskell-related blog articles| http://haskell.org/haskellwiki/Blog_articles]]. I added my [[separating IO from logic -- example]]. === Type the text for 'New Tiddler' * Updated the what's-new list on my [[home page| http://conal.net]]. I really want to take a whole new approach to my home page and web site. Something that shows the overall structure and invites the reader into exploring sections. Would I want a TiddlyWiki? * In using applicative functors for Phooey & TV, address some questions. ++++ How to model outputs (value consumers)? Currently I use @a ~> ()@. If I have an applicative functor F that represents sources of values, I can map inputs to F. What about outputs? Consider an output (type-specialized): \begin{code} stringOut :: Output UI String \end{code} I might map @stringOut@ to \begin{code} stringDisplay :: UI (String -> IO ()) \end{code} Alternatively, to \begin{code} (stringDisplay <*>) :: UI String -> UI (IO ()) \end{code} I'd probably want to use @fromBottomL@ for @presentLambda@, to get the output below the input. How to deduce the choice of @IO@ here or eliminate the need for it? Idea: compose the applicative UI functor with a Kleisli arrow to make a new arrow: \begin{code} type UIA = UI :.:: KIO \end{code} where @:.::@ composes unary and binary type constructors. ++++ \begin{code} newtype (f :.:: (~>)) a b = T_TT { runT_TT :: f (a ~> b) } instance (Applicative f, Arrow (~>)) => Arrow (f :.:: (~>)) where pure = T_TT . pure . A.pure T_TT f >>> T_TT g = T_TT (liftA2 (>>>) f g) first (T_TT f) = T_TT (liftA first f) \end{code} === Now I can just change Phooey to use the applicative functor interface, and leave TV to work with arrows. On the other hand, I don't make much use of arrows in TV, mostly @a ~> ()@ and @() ~> b@. Maybe there's a simple & general way to have the output type determined. Maybe an associated type. === * Haskell projects ++++ * I'm pretty happy with my small, simple @DataDriven@ library and its use in @ApplicativeUI@. Next, rebuild @MonadUI@, using @DataDriven@. * Redid @MonadUI@. ++++ Wow. Now @ApplicativeUI@ will fit right on top of it. In the new @MonadUI@, \begin{code} type UI = ReaderT Win (LayoutT IO) type Source = DataDriven IO type Upd = Updater IO -- IO () \end{code} Widgets have types like \begin{code} stringDisplay :: Source String -> UI (Source Upd) textEntry :: UI (Source String) \end{code} I haven't yet figured out how to handle things like sliders with dynamic bounds, i.e., widgets that have both an input and an output aspect. === === * Haskell projects ++++ * More recent progress reworking [[Phooey]] on top of applicative functors. It's all getting very simple. * One key simplification was to change the monadic version so that "sources" are data-driven values, rather than simple IO computations. It was then easy to rebuild the applicative version on top of the monadic version. * I explored why my recursive examples blow up. For instance, ++++ \begin{code} ui4 :: UI (Source ()) ui4 = mdo lo <- isliderDyn (pair (pure 0) hi) 3 hi <- isliderDyn (pair lo (pure 10)) 8 return (pure ()) pair :: Applicative f => f a -> f b -> f (a,b) pair = liftA2 (,) \end{code} === I think the problem is that both widgets try to extract a value from the other when they're created. Possible solution: bubble up delayed actions during UI creation. I could probably do it with a simple @WriterT@ monad transformer. In mtl, @WriterT@ works with arbitrary monoids, not just lists. So I don't even have to make a list of actions and then sequence them. === * NVC ++++ * I love how our Seattle Unity series is going. We had our fourth (three-hour) meeting out of six last night, and then stayed after to talk for another hour. Pretty deep. === * Haskell projects / ICFP prep ++++ * Where am I now? ++++ * I like the applicative functor approach for data-driven computation and GUIs. * Dynamic sliders work in the monad version but not the arrow version. Odd. === * Where to go? ++++ * Define scope of papers and get focused on writing. * More "event" stuff. Cumulative data-driven computations. Examples. * I want to get some missing instances into the standard libraries. * Get dynamic sliders working in arrow interface. * Recursive version of MonadUI, ArrowUI. What about ApplicativeUI? Is there a standard fixpoint operator? * Do I want to re-do TV on top of an applicative interface or leave in terms of arrow? * Could I build ArrowUI on ApplicativeUI? * Remove the Phooey module and recommend that apps directly import MonadUI, ApplicativeUI, or ArrowUI. Maybe rename them Phooey/{Monad,Applicative,Arrow}.hs. === * Possible paper topics ++++ * "Interfaces for Data-Driven Computation", or "Imperative GUIs -- Phooey! Simple Functional Approaches to GUI Programming" * "Tangible Values and Composable Interfaces" * "Functional Programming by Interacting with Tangible Values" === * Made a Haskell wiki page with my current [[type composition| http://haskell.org/haskellwiki/TypeComposition]] module, asking for comments & suggestions. === * Haskell projects / ICFP prep ++++ * Here's a paper I'd love to submit: "GUIs are visualizations of pure functional values". ++++ * Currently it's more of a thesis statement than a demonstration. * [[TV]] points in this direction. * To really have a paper, demonstrate the thesis with typical and varied GUI examples. * Not just graphical interfaces, but also IO, as in [[separating IO from logic -- example]] === * Discovered a wonderful simplification of the @DataDriven@ implementation. ++++ Reading the @Applicative@ source code, I ran across this definition: \begin{code} instance Monoid a => Applicative ((,) a) where pure x = (mempty, x) (u, f) <*> (v, x) = (u mappend v, f x) \end{code} (I asked about the motivation for this definition and learned that it corresponds to the writer monad.) Note how similar this code is to my @DataDriven@ instance: \begin{code} data DataDriven src m a = DD { current :: src a, subscribe :: m } instance (Monoid m, Applicative src) => Applicative (DataDriven src m) where pure a = DD (pure a) mempty DD srcF subF <*> DD srcX subX = DD (srcF <*> srcX) (subF mappend subX) \end{code} Aha! My @DataDriven@ definition is isomorphic to a simple composition: \begin{code} type DD src m = (,) m :.: src \end{code} I can even replace the monoid instance \begin{code} -- standard Monoid instance for Applicative applied to Monoid instance (Applicative (DataDriven src m), Monoid a) => Monoid (DataDriven src m a) where { mempty = pure mempty; mappend = (*>) } \end{code} with this more general version \begin{code} -- standard Monoid instance for Applicative applied to Monoid instance (Applicative (f :.: g), Monoid a) => Monoid ((f :.: g) a) where { mempty = pure mempty; mappend = (*>) } \end{code} Note that the @Applicative@ constraint is implied by @Applicative f@ and @Applicative g@, thanks to the @Applicative@ instance for @f :.: g@. I use the form above, because it's totally boilerplate. === === * Haskell projects / ICFP prep ++++ * Where to go from here with Phooey? I could make a simplified version with data-driven values, a monad, and an applicative functor. At first omit even flexible layout. * What about TV, which currently expects an arrow? ++++ * Can I change it to work with a functor and a cofunctor? * Would I parameterize by both a functor & cofunctor? * Or maybe use functional dependencies or [[indexed types| http://haskell.org/haskellwiki/GHC/Indexed_types]] (in GHC 6.7+). * For Phooey, I have a functor (the applicative functor @UI@). What's the cofunctor? ++++ I think it's @Consumer UI IO@, where \begin{code} newtype Consumer f src a = Consumer (f (a -> src ())) instance Functor f => Cofunctor (Consumer f src) where cofmap h (Consumer f) = Consumer (fmap (. h) f) \end{code} However, I can factor @Consumer@ very nicely \begin{code} instance (Functor g, Cofunctor f) => Cofunctor (g :.: f) where cofmap h (T_T gf) = T_T (fmap (cofmap h) gf) newtype Flip g b a = Flip (g a b) instance Arrow (~>) => Cofunctor (Flip (~>) b) where cofmap h (Flip f) = Flip (arr h >>> f) type Consumer f src = f :.: Flip (->) (src ()) \end{code} Here's another definition \begin{code} newtype Consumer' f src a = Consumer' (f a -> src ()) \end{code} Or, factored: \begin{code} type Consumer' f src = Flip (->) (src ()) :.: f \end{code} and that version needs the following, also compelling, definition for @Cofunctor (g :.: f)@ \begin{code} instance (Cofunctor g, Functor f) => Cofunctor (g :.: f) where cofmap h (T_T gf) = T_T (cofmap (fmap h) gf) \end{code} If I provide both instances, GHC says "Duplicate instance declarations". What to do? === === * Idea: use associated types in Eros, to eliminate the continuation passing style used in interpreting gestures. === Lately: * Getting ready to move to California (house in the woods on 20 acres). Leaving Friday April 6. * Worked quite a bit on [[Tangible Functional Programming| http://conal.net/papers/Eros]]. I put a draft and [[blogged| http://conal-elliott.blogspot.com/2007/03/icfp-07-paper-draft-comments-please.html]], which got onto Planet Haskell, [[programming reddit| http://programming.reddit.com/search?q=conal]], and [[Lambda the Ultimate| http://lambda-the-ultimate.org/node/2159]], yielding a couple thousand visitors. I asked for comments. The paper is due April 6 (moving day). * Just now, I put out [[Phooey| http://haskell.org/haskellwiki/Phooey]] 1.0 and [[TypeCompose| http://haskell.org/haskellwiki/TypeCompose]] 0.0. I sent Wolfgang Jeltsch a pointer and asked him to give it a try and let me know what happens. I'd told him about my new applicative functor approach, and he's now done it also in his "Vegetables" system. He's planning to submit to the Haskell Workshop, as I am. I suggested we explore options, including collaboration. I don't yet know what his contributions would be. Type the text for 'New Tiddler' * I'm reading Alan Watts's "The Wisdom of Insecurity". Some favorite passages ++++ > Human beings appear to be happy just so long as they have a future to which they can look forward--whether it be a "good time" tomorrow or an everlasting life beyond the grave. For various reasons, more and more people find it hard to believe in the latter. On the other hand, the former has the disadvantage that when this "good time" arrives, it is difficult to enjoy it to the full without some promise of more to come. If happiness always depends on something expected in the future, we are chasing a will-o'-the-wisp that ever eludes our grasp, until the future, and ourselves, vanish into the abyss of death. [p 15] > The common error of ordinary religious practice is to mistake the symbol for the reality, to look at the finger pointing the way and then to suck it for comfort rather than follow it. Religious ideas are like words--of little use, and often misleading, unless you know the concrete realities to which they refer. The word "water" is a useful means of communication amongst those who know water. The same is true of the word and the idea called "God". [p 23] > We must here make a clear distinction belief and faith, because, in general practice, belief has come to mean a state of mind which is almost the opposite of faith. Belief, as I use the word here, is the insistence that the truth is what one would "lief" or wish it to be. The believer will open his mind to the truth on condition that it fits with his preconceived ideas and wishes. Faith, on the other hand, is an unreserved opening of the mind to the truth, whatever it may turn out to be. Faith has no preconceptions; it is a plunge into the unknown. Belief clings, buth faith lets go. In this sense of the word, faith is the essential virtue of science, and likewise of any religion that is not self-deception. [p 24] > You can only know God through an open mind just as you can only see the sky through a clear window. You will not see the sky if you have covered the glass with blue paint. [p. 25] > What religion calls the vision of God is found in giving up any belief in the idea of God. [p. 27] === * My applicative functor formulation of [[Phooey]] points to an idea for changing TV. Instead of an Input Functor and an Output cofunctor, have just Input. Define @Output a = Input (Sink a)@, perhaps generalizing Sink to some other cofunctor or comonad. Note that a functor & cofunctor composed yield a cofunctor, as expected. Problem: I represent Output as an (generalized) algebraic data type so I can transform it via the DeepArrow combinators. * [[WiPeer| http://www.wipeer.com]] is free software that enables wi-fi computers to connect without a router. * I've been side-tracked from software work by the Murray Creek move and settling in. What might I work on now? ++++ * Paper for Haskell Workshop. About what? ++++ * Phooey and data-driven computation. * TV for a variety of "interface" arrows. === * Think more about the Zipper. Could it be an instance of my favorite Wand paper ("Continuation-Based Program Transformation Strategies", I think JACM 1980)? What happens with more static typing, as in a statically typed object language? * Eliminate the complicated continuations in my Eros implementation. Explore using associated types instead. * More generally, work on refactoring Eros and preparing for release. * Get Pajama re-factored and released. * Misc new projects ++++ * Get going on something 3D, especially with gloves and head-tracking head-mounted display in mind. I'll be getting the P5 gloves soon. * Music/sound synthesis === === * Some links: ++++ * Found [[Windows keyboard shortcuts| http://www.smartcomputing.com/editorial/article.asp?article=articles/archive/l0811/12l11/12l11.asp]] * [[clipmarks|http://www.clipmarks.com]]: Social web content clipping. * [[Genuine Curiosity| http://www.genuinecuriosity.com]] === * Responded to [[Eros paper comments from Kragen]] * Idea: incremental web page viewing. When I visit a web page, the server sends me a delta, i.e., a description of an edit that brings my copy up to date. Ditto for any mutable data on the internet. Could such a thing be rigged up from a version control system? * In general, design web sites and services with streaming in mind, rather than a single block of data. Consider these examples: blog, tiddlywiki, catalog. Hm. Make this description clearer, since (I think) HTML already streams into a web browser. * Describing the Koch curve (inspired by [[A quick diversion| http://calculist.blogspot.com/2007/04/quick-diversion.html]]). Some thoughts ++++ Use a single representation for all levels of detail. Have a constructor that provides an approximation and an exact version. For the Koch curve, the approximation will be a line segment, and the exact version will be nine transformed versions of the whole. The transformations are (a) uniform scale (by 1/5 for the Koch curve), (b) turn left, (c) turn right, and translate (to the end of the canonical line segment). Provide a render function that turns an abstract curve description into a stream of drawing commands, given a depth or epsilon. \begin{code} data Figure = LineSeg | Xfo Transform Figure | Figs [Figure] | Approx Figure Figure fractal :: [Transform] -> Figure fractal xfos = frac where frac = Approx LineSeg (Figs [Xfo xfo frac | xfo <- xfos]) \end{code} === * I'm reading Alan Watts's book "The Wisdom of Insecurity". The following passage touches the heart of what I wanted to get at in my post "Trust that ..." ++++> We must here make a clear distinction belief and faith, because, in general practice, belief has come to mean a state of mind which is almost the opposite of faith. Belief, as I use the word here, is the insistence that the truth is what one would "lief" or wish it to be. The believer will open his mind to the truth on condition that it fits with his preconceived ideas and wishes. Faith, on the other hand, is an unreserved opening of the mind to the truth, whatever it may turn out to be. Faith has no preconceptions; it is a plunge into the unknown. Belief clings, buth faith lets go. === What Watts calls "faith", one might also call "Trust", and what he calls "belief", one might call "trust that ...". In this sense, the need for "Trust" does not depend on someone acting a certain way and so is re-aligned with what we call "needs" in NVC. Type the text for 'New Tiddler' * Called Starband: 888-424-4121 (option 4 for "maintenance event"). Notes: ++++ * We have a "SkyEdge" modem. * For modem/dish info, visit 192.168.1.1. Oops -- IP conflict with the router. Had to direct-connect for now. Click on "telemetry" and confirmed that the second number was 7ish dB rather than 5 or lower. * Started using testmy.net for speed test. * Ron noticed that we're in the bandwidth penalty box. * Info on our limit: ++++ * Limit: 976.6mB in a 7 day window. When hit, we get throttled until our 7 day window bandwidth is 683.6mB. * History: May 5: 173mB, 6: 381mB, 7,8,9 negligible, 10: 419, 11: 362. * To track our usage: sign in to starband.net, click "my accounts", then link at page bottom to usage graph. === === * Tracking bandwidth usage with the [[Starband monitor| http://www.starband.net/monitor/viewgraph.aspx?gp=1]]. I suspect that bandwidth gets eaten even when both computers are suspended. Try again. 11:23:58, 16.9MB. Suspending. * A while back, Wolfgang J pointed out an inefficiency of the UI arrow in Phooey, related to automatic splitting and merging of pair-valued sources. His solution is to keep sources explicit. Another idea is to use type functions to avoid having to cram multiple sources into one. Similar to Manuel Ch's trick of efficient array representations. Catch: if only some types can be handled, then I can't strictly implement the arrow interface. * Reimplemented Phooey's applicative functor (AF) interface very simply, using type composition. ++++ \begin{code} type Extractor = IO type Notifier = IO () -> IO () type DataDriven nfr xtr = (,) nfr O xtr type Source = DataDriven Notifier Extractor type UI = (->) Win O IO O (,) Layout O Source \end{code} I'm not sure the abstraction is worth it here. Alternatively, \begin{code} type Source = (,) Notifier O Extractor type Source = (,) (IO () -> IO ()) O IO \end{code} All in one: \begin{code} type UI = (->) Win O IO O (,) Layout O (,) (IO () -> IO ()) O IO \end{code} === * Talked to Paul at OmniVision and with Joseph. We're going to get mom set up with new modem & service rather than moving the old. I got the process started. For now, I set it all up in my name and gave my EJ visa debit card number for the600 installation.  Call Paul before Friday if I want to tweak anything about billing.  One glitch: I gave him mom's phone number, and I really want mine to be the contact.
* Notes from Jim Lee's sermon on May 6 at Christ Unity church ++++
*  "Despite appearances to the contrary ...".  Shoot for more: see how the appearances are in fact consistent.
*  Jesus spoke clearly & compellingly.  Idea: form a group for developing clear & compelling expressions of what each member cares about.  Embrace and leverage diverse orientations.
*  See the condition of "unforgiveness" as Right/Wrong thinking.  The cure is not to "forgive" a wrong, but rather to let go of one's belief in the notion of wrong.
*  Heaven is a verb: to rise, to elevate one's perspective.  Heaven isn't "up there"; it's orienting myself in an upward direction, energetically.
===
* I got my ICFP reviews back today, beginning the 48-hour response period.  I'm satisfied with the reviewers' understanding, so I won't respond.
* I thought I'd have a Haskell Workshop submission on Phooey, but maybe there's not enough to say.  I love the elegance of the formulation of data-driven computation and of the UI applicative functor.  I'm satisfied with the generalization to flexible layout.  But what else is there to say?  Maybe write a short series of blog posts instead.
* I'm considering making a new Phooey release, without the arrow or monad versions.  Sweet and simple.  Is there a loss?  Maybe do a new spinoff project instead, and leave Phooey as is.  The new system is about "Applicative GUI programming", so
* Lennart Kolmodin has a Haskell library, "[[inotify| http://haskell.org/~kolmodin/code/hinotify]]".   "inotify provides file system event notification, simply add a watcher to a file or directory and get an event when it is accessed or modified."  Sounds like a good fit for my simple data-driven evaluation library (in [[TypeCompose| http://www.haskell.org/haskellwiki/TypeCompose]]).  Probably Linux only.
* What kind of tools exist for (push-based) notification of changes over the web, e.g., a web page or database element?  I did some searching and came up with a paper: [[Engineering Push-Based Web Services| http://citeseer.ist.psu.edu/brenna06engineering.html]].
* Thought on notifiers (news/subscription services).  Consider |a+a| for a source |a|.  The notifier would be |na mappend na| for a notifier |na|.  In my current representation, any subscribing action would get invoked twice.  Try another representation that eliminates redundant invocation.  Could have a map from unique (probably integer) tags to simple notifiers: ++++
\begin{code}
type Notifier = Map Int (IO () -> IO ())
\end{code}
===
Type the text for 'New Tiddler'
* Ideas to pursue ++++
*  In DeepArrow (and its uses), |fmap| generalizes |result| and |second|.  Can I replace them?
*  Express thesis of "functional interfaces" work
*  Explore applications of indexed types ++++
*   Linear maps (derivatives)
*   Eros GUI construction
===
===
* Put up a paper draft with wiki page [[Applicative data-driven programming| http://www.haskell.org/haskellwiki/Talk:Applicative_data-driven_programming]].
* Wolfgang Jeltsch suggested that the redundant notification problem cannot really arise with UIs.  I think he's right and that the AF interface imposes a linearity on source use.  I wonder if linearity can be exploited in similar applications, say to enable destructive update.
* Sometimes I sense resistance from someone when I shift my focus from their strategy to expore needs.  What's going on?  Perhaps they are afraid that I will push for them accepting a strategy they're not as happy with.  I could express my hunch about their worry.  If the hunch checks out, then I may want to offer reassurance by explaining my intention.  Or maybe explain the intention up front, as soon as I'm uncomfortable with supporting their strategy.  What is this intention?  It's to come to a solution that we're both completely happy with, and in particular that they like at least as well as their current strategy.
* I want to get back into some software projects & dreams. ++++
*  Revisit Pajama and improve it. ++++
*   Make the controls easier to discover.
*   Build a commerce app that lets people generate and order hi-res renderings of their Pajama toys.
===
*  Eros ++++
*   Reimplement on top of new Phooey and maybe a new TV based on AFs.
===
*  [[self-organizing FAQs]]
*  [[HMD-controlled page turning| http://www.3dvisor.com/forum/viewtopic.php?p=3149#3149]]
*  Simple learning project: make a Firefox plugin that shows my remaining Starband bandwidth.  Would the logon be a problem?
===
* Yesterday, Holly & I visited the Unitarian Universalist church in Sonora.  While it's not what we're looking for in a church experience (spirit/mystery, great music, youthful energy), there is a lot of interest there in social action.   I phoned the minister (Craig Scott) last night, and we set up a meeting for today.  I want to help shift from the opposition (right/wrong) thinking & energy so people can engage in connecting and //effective// dialogs.

* Now that I've simplified Phooey to use AFs instead of arrows (or monads), how to re-do TV? ++++
Currently, @Input@ and @Output@ are indexed by an arrow type "~>", and I have constructors ++++
\begin{code}
IPrim :: (() ~> a) -> Input  (~>) a
OPrim :: (a ~> ()) -> Output (~>) a
\end{code}
===
Instead, I might index by an AF @f@. ++++
\begin{code}
IPrim :: f a -> Input f a
OPrim :: f (a -> IO()) -> Output f a
\end{code}
===
Alternatively, add a cofunctor argument @cf@, as well, for @Output@. ++++
\begin{code}
OPrim :: cf a -> Output f cf a
\end{code}
===
Or relate @cf@ as an indexed type. ++++
\begin{code}
OPrim :: Co f a -> Output f a
\end{code}
===
Can I eliminate the @Input@/@Output@ distinction and say that everything is an input?  Rename "Input" to "Get". Whence @OLambda@? ++++
\begin{code}
Lambda :: Get f a -> Get f (Co f b) -> Get f (Co f (a->b))
\end{code}
===
\begin{code}
OPair :: Output (~>) a -> Output (~>) b -> Output (~>) (a,b)
\end{code}
===
New: ++++
\begin{code}
OPair :: Get f (Co f a) -> Get f (Co f b) -> Get f (Co f (a,b))
\end{code}
===
In contrast, ++++
\begin{code}
IPair :: Get f a -> Get f b -> Get f (a,b)
\end{code}
===
Can I reconcile these two types?  Only if @Co f (a,b) == (Co f a, Co f b)@.  Note the resemblence with Manuel Ch's efficient array representation, which motivated the indexed type extension to GHC.  Not quite, though, since the structure of @f@ isn't being used here.
===
* Is there a useful notion of "applicative //co-//functor"?  I think it would include something like ++++
\begin{code}
liftC2 :: (c -> (a,b)) -> cof a -> cof b -> cof c
\end{code}
which can be defined via the simpler @cozip@ (and @comap@):
\begin{code}
cozip :: cof a -> cof b -> cof (a,b)
\end{code}
===
@cozip@ is just what I need for presenting output pairs (@OPair@) in TV.  Not sure it makes sense to call this functionality "applicative".
* What about @OLambda i o :: Output f cof (a -> b) @?  Since, @accept i :: f a@ and @present o :: cof b@, combine the results with a new function @lam :: f a -> cof b -> cof (a->b)@.  Maybe @lam@ is a method of a new type class.  Here's a related arrow function, used in TV. ++++
\begin{code}
lam :: Arrow (~>) => (() ~> a) -> (b ~> o) -> ((a->b) ~> o)
lam ia ob =
arr (\ f -> (f, ()))  >>>
second ia             >>>
arr (uncurry ($)) >>> ob \end{code} === The AF version is simpler: ++++ \begin{code} lam :: Applicative f => f a -> f (b -> o) -> f ((a->b) -> o) lam = liftA2 (\ a snkb -> \ f -> snkb (f a)) \end{code} === * The version of @lam@ above is probably not what I want for @IO@. Instead, ++++ \begin{code} lamIO :: IO a -> (b -> IO ()) -> ((a->b) -> IO ()) lamIO ioa oib = \ f -> ioa >>= oib . f \end{code} === * I've started corresponding with Shahbaz Chaudhary about AFs and algorithmic stock trading. Some thoughts: ++++ * Use an AF of data streams, starting with stock market data. * Define additional streams, derived from the market data, with things like running aggregates (extrema, sums, averages, etc). Do as much as possible with just AF operations. See what else we need. * Make a data type of (buy/sell) actions and form action streams via @pure@ & @<*>@ (& @<$>@ & @liftAn@) over data streams.
*  Perhaps use & extend my "DataDriven" library to represent the data streams.
*  Extensions would include scans and filters.
*  Given the huge amount of data, strictness is probably very important, particularly for scans.
*  If we need a performance boost, consider compilation strategies.  We could simply use GHC to compile the Haskell code, perhaps along with some rewrite rules (fusion etc), or maybe something like the Pan compiler.
===
* Continuing yesterday's ponderings for updating TV to AFs. ++++
*  The type of @lam@ wasn't quite ideal for @IO@.  I could go back to an explicit cofunctor, as on [[2007-06-05]]. ++++
\begin{code}
lam :: (Applicative f, Cof f cof) => f a -> cof b -> cof (a->b)
\end{code}
===
For @f = IO@, I can use @cof = Flip (->) (IO ())@.  What is in the @Cof@ class?  Some way to combine @f@ and @cof@.  Look at @lam@ for @IO@. ++++
\begin{code}
lamIO :: IO a -> Flip (->) (IO o) b -> Flip (->) (IO o) (a->b)
lamIO ioa (Flip oib) = Flip (\ f -> ioa >>= oib . f)
\end{code}
===
Maybe @lam@ //is// the method for @Co@. ++++
\begin{code}
class Cof f cof where lam :: f a -> cof b -> cof (a->b)
\end{code}
===
===
In generalizing TV to work with monads and applicative functors, in addition to arrows, I'm playing with abstract classes one might call "Pair" and "Lambda".  I'd like to know if anyone has seen these classes and/or may have some ideas for uses & theoretical connections.

The classes:
\begin{code}
class Pair cof where
pair :: cof a -> cof b -> cof (a,b)

class Lambda f cof where
lambda :: f a -> cof b -> cof (a->b)
\end{code}
The reason for the names "f" and "cof" is that typically, @f@ is a functor and @cof@ is a cofunctor.  The @pair@ method makes a consumer of pairs from two consumers.  The @lambda@ method makes a consumer of functions from a producer of arguments and a consumer of results.
* Becky will be moving to Marlenton, WV next month.  I offered to drive with her and give her my car.  Today she called and said that sounds fun to her and she'd like to do it.  Her orientation is in Atlanta, starting on July 24 and going three or four days.  We'll visit Jake on the way.  Perhaps I'll fly back from Atlanta.  Becky suggested that we leave around the 14th.  The driving trip overlaps the WG2.8 meeting in Iceland.  I sent a note to Fosshotel about canceling.  I'll tell Simon PJ also.  I was getting disinclined to go anyway, as Iceland wasn't calling to me, and I don't like long, cramped flights.
* Batteries shipped.
* Bear (Barak) called.  He'll work with us on a vision phase of the project, rather than diving into specific plans.  He bills at $75/hr. We made an appointment for Friday June 30 at 10am. * I want to simplify the way UIs are constructed in Eros. Static typing got in the way of a straightforward implementation. I want to try again, using indexed types. ++++ Overall plan ++++ * Recursively process outputs (visualizers) to make GUIs. * Pass down and accumulate a description of what to do when the user chooses a sub-output. Includes the overall TV and "path" ("transformation embedder", i.e., composition of first, second, result). === Here's a first cut at some code (adapted from Eros). \begin{code} wPutPut :: WPut a -> WPut b -> WPut (a,b) wPutPut (WPut wpa) (WPut wpb) = WPut wp where wp tv path c ty = withPanel$ \ pan ->
do lty <- handleArg c pan ty tv path vfill
(la,oiA) <- wpa tv (path . first ) c tya pan
(lb,oiB) <- wpb tv (path . second) c tyb pan
return ( row 0 [la,lty,lb]
, \ (a,b) -> oiA a >> oiB b )
where
(tya,tyb) = dsPairTy ty
\end{code}
In |handleArg|, extract (from state in |c|) the arrow value |arr| to be applied deeply and make a new TV |tv' = path arr  tv|.   The puzzle is how to describe the types of |tv|, |arr|, |path|, and |tv'|.
First try a type class with functional dependencies.
\begin{code}
class Path tv path arr tv' | tv path arr -> tv' where
xform :: tv -> path -> arr -> Maybe tv'

instance Path tv (forall a. a->a)
\end{code}
===
* New Eros ++++
*  For an |Output a|, put up a GUI, //and// return an "event" (can be subscribed to) whose occurrence data includes a type representation and a path (transformation embedder).  For an |OPair|, tweak the two events by composing |first| or |second| with the path, and then merge the two results into a single event.  Also merge in the event for picking the pair directly.  Another way to think of the occurrence data is as a function from a type-tagged-arrow to a possible new arrow.
\begin{code}
data Typed  f    = forall a   . Typed  (Ty a) (f a)
data Typed2 (~>) = forall a a'. Typed2 (Ty a) (Ty a') (a ~> a')

type Lifter (~>) a = Typed2 (~>) -> Maybe (Typed ((~>) a))
\end{code}
===
* Corresponding with David Duke (Leeds) about functional scene modeling, AFs, and Vertigo.
* Haskell note: "Wanted: tricks for conflicting instances". +++>
In playing with compositions involving functors & cofunctors, I've run into a conflict in the instances I want to provide.  I imagine this sort of problem is well-known, and I'd like to hear what kinds of strategies people apply.

Here's a definition of type composition:
\begin{code}
newtype O g f a = O { unO :: g (f a) }
\end{code}
and a cofunctor class:
\begin{code}
class Cofunctor cof where
cofmap :: (a -> b) -> (cof b -> cof a)
\end{code}
We can compose functors to get a functor, cofunctors to get a functor, and functor & cofunctor in either order to get a cofunctor.
\begin{code}
instance (Functor g, Functor f) => Functor (O g f) where
fmap h (O gf) = O (fmap (fmap h) gf)

instance (Cofunctor g, Functor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (cofmap (fmap h) gf)

instance (Functor g, Cofunctor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (fmap (cofmap h) gf)

instance (Cofunctor g, Cofunctor f) => Functor (O g f) where
fmap h (O gf) = O (cofmap (cofmap h) gf)
\end{code}
I've wanted all four of those instances.  The problem is that instance selection (in GHC at least) ignores the contexts.  Without context, the first and fourth instances conflict, as do the second and third.  Thus I statically choose two of the four rules and comment out the other two.  Depending on my application, sometimes I like my choices, and sometimes I don't.

Are there work-arounds to get the flexibility I want out of GHC?

Is it plausible to improve instance selection to use contexts?  I imagine doing so would require some kind of backtracking search.

Thanks,  - Conal
===
* Here's a simple approach to gestural composition.  During GUI construction, produce a path-valued event.  For selecting the current GUI, the path is empty (identity function).  For a composite GUI, additionally merge in fmap'd versions of the sub-GUIs, adding on a first, second, or result.  Very simple idea.  Work out the typing. +++
\begin{code}
type Win = Panel ()

type OI a = a -> IO ()

type WinIO a = Win -> IO (Layout,a)

type WPut a = Ty a -> WinIO (OI a, Event Path)
\end{code}
How to define the |Path| type?  What is its relationship in the definition of |WPut| to the type parameter |a|?  In |wPutPut|, we can compose |first| and |second| only because of the pair type of the |WPut| being constructed.  At every level, the path must be applicable to the GUI type.  More precisely, it must have type |a ~> a'|, for some type |a'| and every deep arrow.  I think we'll need the type a' for use with |WPut|.
\begin{code}
data DeepFrom a =
forall a'. DeepFrom (Ty a') (forall (~>). DeepArrow (~>) => a ~> a')
\end{code}
I don't think the deep arrow universality is critical.  It could instead be a type parameter:
\begin{code}
data DeepFrom' (~>) a = forall a'. DeepFrom' (Ty a') (a ~> a')

data TvFunFrom a = DeepFrom' TvFun
\end{code}
Still, I like the universality, so I'll keep it around.
\begin{code}
type WPut a = Ty a -> WinIO (OI a, Event (DeepFrom a))
\end{code}
No, that's not it.  I want not an arrow but a function that takes a "seed" arrow (probably with type annotations) and yields an optional new arrow from |a|.  Optional, because the given seed arrow may have the wrong source type.

Oh -- remember that I want early feedback about whether the current seed arrow is applicable to a sub-gui.  When the seed is chosen, I'd like all compatibly-typed GUIs to highlight.  That could be easy.  During GUI construction, pass in a seed-change event.  Each sub-gui handle adds a subscriber that checks the source type and either enables the sub-gui for composition and highlights it, or un-highlights and disables for composition.  Oh -- then each sub-gui could store an optional compatibly-typed current seed arrow (and result type).  In that case, maybe the events don't have to return the arrow-transforming-function, but rather a transformed arrow, with result type.
\begin{code}
idTD :: DeepArrow (~>) => Ty a -> Typed ((~>) a)
idTD tya = Typed tya idA

firstTD :: Arrow (~>) => Ty b -> Typed ((~>) a) -> Typed ((~>) (a,b))
firstTD tyb (Typed tya' a_a') = Typed (pairTy tya' tyb) (first a_a')

secondTD :: Arrow (~>) => Ty a -> Typed ((~>) b) -> Typed ((~>) (a,b))
secondTD tya (Typed tyb' b_b') = Typed (pairTy tya tyb') (second b_b')

resultTD :: DeepArrow (~>) => Ty a -> Typed ((~>) b) -> Typed ((~>) (a->b))
resultTD tya (Typed tyb' b_b') = Typed (funTy tya tyb') (result b_b')
\end{code}
Also, an event signaling the presence of a new seed arrow, tagged by source and result types.  And one signaling a arrow being directed at a given output.
\begin{code}
type NewSeed (~>) = Event (Typed2 (~>))
type Transforming (~>) a = Event (Typed ((~>) a))
\end{code}
Rendering an output requires the new-seed event, the output type and a place to put widget.  It yields an event saying that an arrow is being directed at the output, along with a consumer for output values.
\begin{code}
type WPut (~>) a = NewSeed (~>) -> Ty a -> WinIO (OI a, Transforming (~>) a)
\end{code}
With these definitions, the following code type-checks:
\begin{code}
wPutPut :: WPut a -> WPut b -> WPut (a,b)
wPutPut wpa wpb = wp
where
wp newSeed tyab = withPanel \ pan -> do (lab,evab) <- handleArg newSeed tyab pan (la,(oia,eva)) <- wpa newSeed tya pan (lb,(oib,evb)) <- wpb newSeed tyb pan return ( row 0 [la,lab,lb] , ( \ (a,b) -> oia a >> oib b , evab mappend fmap (firstTD tyb) eva mappend fmap (secondTD tya) evb ) ) where (tya,tyb) = dsPairTy tyab \end{code} === * Continuing with my Eros re-design from [[2007-06-14]] ++++ * Automate the layout accumulation via pair AF: @(,) Layout@ * Ditto for event accumulation, after re-ordering the inner pair: @(,) (Transforming (~>) a)@ * Automate the layering via type composition: @(,) Layout O (,) (Transforming (~>) a)@ * The @fmap@-s get applied to the accumulated events, so type composition won't automatically route it where I want. * Consistently rearrange so that the pair piece comes between the first and second. Then the final assembly can sequence them all at once. * Tweak @oia@ and @oib@ into @(a,b)@-consumers (each discarding half). Then the consumer construction is just @mappend@. === * Reply to Moreah ++++> > From a conversation with Elana, my understanding is that she and maybe other TC members have some concerns around ITC not choosing to be more connected with the business of running NCC --- materials, fees, web site, mail, events, library, book sales, answering the phone, etc. From the March 15 NCC TC proposal: > I propose that that the Training Circle is composed of Certified CNVC Trainers who want to work together towards the NCC and CNVC mission. > ... > For simplicity, clarity, and integrity, I would like to suggest that NCC exists to support and promote local and visitng CNVC Certified Trainers, trainings and materials. Given these statements of inclusion/exclusion and of purpose, I don't know why non-cnvc-certified folks would want to invest their life energies in "the business of running NCC". I'd expect them instead to invest in personal and community efforts to support and promote their own contribution and learning. > I SEE the value for ITC of learning and connecting, AND if we want to have the benefits of NCC, now or later, we probably need to consider what we’re contributing to the organization. I'm guessing there's more reward/punishment motivation here than hungry-duck-feeding joy. Am I interpreting accurately? As John Buck taught us, a key part of the sociocratic process (and of Life!) is feedback. From what I could tell, the TC formation did not follow the spirit and intent of sociocracy. Specifically, I didn't see misgivings ("niggles") about exclusion nurtured more effectively, by giving all the time it took to find strategies that would meet all needs fully. Given what is, there's still a simple, clean and powerful feedback process available: invest your energy in what serves you and withdraw it from what doesn't serve you. NCC business will be supported or not, and the feedback on NCC purpose and TC membership will be received. Why am I taking the time to write this note? I want to see everyone apply their life energy in ways that fulfill their hearts' desires, free of old habits like reward/punishment motivation. Also, I am deeply inspired with the potential of the Life (vs control) paradigm, and I'd like to see progress in realizing that potential. === * Corresponding with [[Jacob Gotwais| http://humanpotentialarts.com]]. ++++> I like your TV-vs-NVC distinction very much. To paraphrase, by & large, people in our culture are much more familiar with television than with NVC. For that reason they're better equipped to rate presenters of television (TV sets) than presenters of NVC. Am I tracking your meaning? I'd like to start with your question > ... when I get my NVC, how do I know whether it is really NVC? Can anyone really say what NVC //is//? Under the surface, is question even meaningful? Perhaps the question itself rests on an assumption that is at odds with the essence of NVC, namely the assumption that NVC //is// anything at all. I see all Life (people etc) as process, and so I don't want to use "is" language about living beings. I see NVC the same way -- alive & evolving. I mean that historically, when I think of NVC as meaning MBR's personal mental framework & spiritual practice, as they have been evolving for decades. And I mean it for myself and for you as individuals, as well as the collectively evolving understandings and practices. NVC has never been a static thing, thank goodness! I have heard (top-down) certification described as a strategy to preserve the integrity of NVC or of "the NVC process" or of "the NVC teachings". Not only do I see this strategy failing (considering some things I've heard from people certified by CNVC), I see an inherent contradiction in the goal itself, aside from any specific strategy. The contradiction is that, for me, NVC is all about serving Life, and Life isn't into the preserve & protect (or embalm & encase) paradigm. Rather, Life is into adaptation, evolution and (bottom-up) emergent behavior. > ... In other words I'm concerned that a need > that may not be met without any trusted certification/training processes > in place is the integrity of NVC itself. Consider for yourself: could "the integrity of NVC itself" really be a need? If you mean the current theory and practice of NVC (say, as expressed in MBR's book and other sources), then surely you're talking about a mere strategy, right? If you mean something deeper that underlies and transcends current theory and practice, then I doubt that the certification process is measuring it. If we cannot say what NVC "is", where can we go, and how can we talk about "teaching NVC", "practicing NVC", etc? Must we toss out the baby with the bath water (or toss out [[the sacred place with the raft| http://evolve.awakeningcompassion.com/?p=12]])? Instead, I'd like to shift attention to the underlying intention, i.e., shift from strategy to needs. Instead of asking questions like "Is it NVC?" or "Is it good NVC?" (external evaluations), I'd like to ask people questions like the following. * Did you learn about habits of yours that inhibit compassion in yourself and others? * Did you get some new perspectives and tools to help you stay connected to the humanity in yourself and in others? * Do you trust the leader's personal authenticity in relating to you and others? * Have you been able to more often find your way toward agreements that endure and support goodwill? * When you apply what you learned, do you feel more peaceful & joyful? Or replace these questions with ones that better capture the essence of what NVC is striving for, in its essence. My desire is to give everyone a means of steering their personal practice of connection and the continuing evolution and practice of NVC. (This steering may enable the "learning community" I hinted at in Emergence.) Over time, the details of current practice may be very different from what they are today, just as today's are very different from 30 years ago. If historians want to know what NVC practice used to be, let them read the old books and watch the old video recordings. Well, there you go. I'd love to hear your response. I'll be away from tomorrow (Thursday) morning through Monday night. Maybe some internet connection and maybe not. Thanks for the dialog. Warmly, - Conal > On another note -- I have been thinking of the ideas on emergence. Here's > one thought that keeps coming up for me. Choosing an NVC trainer is a > little different than buying a TV. I'm grateful for services like Amazon > that let me browse TV vendors and products and see people's ratings and > the ratings of the ratings (actually I don't own a TV but if I did I'd > feel grateful). When I get the TV, I will have no doubt that it is a TV > and I can submit my own rating. It seems to me that NVC is different in > that when I get my NVC, how do I know whether it is really NVC? When I > first read Marshall's book, I thought I knew what NVC was. 7 years later > I have a totally different idea of what NVC is, and I don't think it would > have been possible to explain to me 7 years ago what I know now - I had to > grow into it. What if everyone gets something that's not really NVC, they > all love it and submit great ratings, leading more people to buy it and > submit more great ratings, etc? In other words I'm concerned that a need > that may not be met without any trusted certification/training processes > in place is the integrity of NVC itself. Anything you'd like to share > about what comes up for you, reading that? > Jake === * NVC presentation blurb for Tuolomne UU. ++++ Fighting the "Good Fight" is Still Fighting: Peaceful, Effective Alternatives, Parts I and II Sundays, August 12th and 26th, 10:30 a.m. Presented by Conal Elliott and Holly Croydon (www.AwakeningCompassion.com) Join us for a two-part introduction to the consciousness and practice of Compassionate Communication (also called Nonviolent Communication or NVC), where you will learn to * communicate your concerns in such a way that people really want to hear them, instead of getting turned off and defensive; * hear the heart of others' concerns and stay connected with their humanity, even when you hear blame, criticism and judgment; and * nurture peace in your life, work, and world. === * Eros ++++ * Consider how to handle some polymorphism in the new Eros. In particular, the "Tweak" menu. Instead of passing down a (monomorphic) "seed" arrow, I could pass a function @mkSeed :: Ty a -> Maybe (Typed (~>) a)@. I already have a way to turn an arbitrary arrow value (fully type-tagged) into a @mkSeed@. * Of course I'd like polymorphic functions as well. * Remember, after composition, I want the old input values to be transferred to the new hybrid. * When rendering an input, generate two events: one means "this input chosen" and has no data; the other means some sub-input was chosen and has the types of the chosen sub-input and residual as data, together with the input extractor. === * Imagine partitioning computation between (a) remote shared server (not under my control), (b) local machine, and (c) an intermediary server. Assume that (c) has a high-bandwidth connection to (a), while (b) has low-bandwidth connection to both (a) and (c). Make sure data reduction happens on (b) and data expansion on (a). Example: extracting/filtering data (reduction), followed by rendering (expansion). * Consider a monoid of partial values: @mempty = undefined@, and @mappend = lub@. Would work nicely for WGet, but I don't think lub can be implemented. Why not? It's deterministic and information-monotonic. I fooled around and came up with the idea of using endomorphisms. ++++ From {{{Monoid.hs}}}: \begin{code} -- | The monoid of endomorphisms under composition. newtype Endo a = Endo { appEndo :: a -> a } instance Monoid (Endo a) where mempty = Endo id Endo f mappend Endo g = Endo (f . g) \end{code} For my use, the idea is that later endos (@g@) replace information from earlier endos (@f@). Here's a little library of operations on partial values. \begin{code} type Partial = Endo -- a monoid inP :: ((a->a) -> (a'->a')) -> (Partial a -> Partial a') inP f = Endo . f . appEndo valp :: c -> Partial c valp c = Endo (const c) pval :: Partial c -> c pval (Endo f) = f undefined unFst :: Partial a -> Partial (a,b) unFst = inP first unSnd :: Partial b -> Partial (a,b) unSnd = inP second unElt :: Functor f => Partial a -> Partial (f a) unElt = inP fmap \end{code} === * Idea: have source/event subscription return an unsubscribe action. ++++ \begin{code} -- Old: data Event a = Event ((a -> IO ()) -> IO ()) instance Functor Event where fmap f = inE (cofmapC (cofmapC f)) -- New: data Event a = Event ((a -> IO ()) -> IO (IO ())) \end{code} Magic: the new @Event@ is still a monoid. Unsubscribers are automatically combined. === * Eros implementation musings ++++ * To implement @wGetPut@ elegantly, I want a @Source a -> OI (a->b)@ and a @OI b -> OI (a->b)@. Then cascading monoid magic cause the two @OI (a->b)@ values to get @mappend@-ed into a single @OI@. * When I get a function, one action is to apply it to the current @a@ value and then consume the resulting @b@. Another action is to save the function in a variable for use when the input changes. * How to separate? * Supppose I treat the @WPut b@ as a @WGet (OI b)@. Then I have a @WGet a@ and @WGet (OI b)@ and need to make a @WGet (OI (a->b))@. No problem, if @WGet@ is an AF, but I don't know how it could be. * Idea: Reverse the embedding: model @WGet a@ as @WPut (OI (OI a))@ (or @WPut (Event a)@) * Oh -- here's the idea I'm looking for. Combine a //source// of consumers (not just a consumer) with a source of values to get a source of function consumers.++++ \begin{code} lam :: a -> (b -> o) -> ((a->b) -> o) lam a bo f = bo (f a) lamA :: Applicative f => f a -> f (b -> o) -> f ((a->b) -> o) lamA = liftA2 lam \end{code} === I like this change of introducing //sources// of consumers. I think it would allow me to use a weaker output-only, stateless replacement for @IO@, such as @Image@. A //source// of visualizations explains why we have more than one visualization. //Interactive// visualization. * Oh, hey. This clearer model may also show me how to retain input values from old TVs to new compositions. * There's a problem with @lamA@: I don't know how to make @WGet@ be an AF, since it (a) takes a @Ty a@ argument and accumulates an @a@-dependent event. I could instead use the @Lam@ class I came up with recently when fooling around with Phooey. Here's version for values that have to be told their types. \begin{code} type T f a = Ty a -> f a lamTA :: Applicative f => T f a -> T f (b -> o) -> T f ((a->b) -> o) lamTA fa fbo abo = lamA (fa a) (fbo bo) where (ab,o) = dsFunTy abo (a ,b) = dsFunTy ab bo = funTy b o \end{code} === * Imagine a delta-oriented file system and editor. Undo, efficient every-version archiving. * From an email exchange with Erika ++++> I don't understand how it could be a "need" of yours to "respect their request". And I don't see "honoring" someone's strategy as in alignment with the Need-centered consciousness of NVC. In my understanding of NVC consciousness, requests/strategies are utterly dispensable, while needs are essential. This principle is for me the central & most powerful insight of NVC. Not only do I not see how to reconcile "respecting a request" or "honoring a strategy" with NVC (and more importantly, with my heart and reason), I don't know what these phrases could really mean. Big red flags go up in me when I hear phrases like these, and I suspect these phrases are hiding underlying cultural programming and unconscious assumptions. I greately admire Marshall's careful reflection on and reshaping of language, to illuminate and help shift out of unconscious cultural programming. It's a gift I appreciate in myself as well. I want to see the gift carried further. There's so much weighty momentum behind language habits, and I want to see us all wake up and make powerful, self-connected conscious choices about the world we're creating. BTW, I'm with you about the name "Nonviolent Communication". "Compassionate" works a bit better for me, but it's still not as descriptive as I'd like. I like "life-serving" better yet, for describing the goal, though not at all the method, and people have many conflicting strategies for how to "serve life". I think it's highly unlikely that any terse & descriptive exists in our language. And for good reason: we speak a language of domination. If & when our culture tips over to one in harmony with Life, then eventually the language will evolve to a language that fits it. Or, more likely, the language and cultural thought patterns will co-evolve. Maybe it's that tight linking (see http://en.wikipedia.org/wiki/Sapir-Whorf_hypothesis) that make cultural shift so difficult (in other words, that make culture so stable). Language and thought are like two legs, shackled together at the ankles. Neither can move much without the other, and so we go stumbling along. On the other hand, this sort of "negative feedback" (each component resisting change in the other) is essential in Life's strategy of evolution, so that change can eventually happen and yield stability (rather than pure chaos such as cancer). Given that we don't have a short descriptive name to use in place of "Nonviolent Communication", I prefer sometimes to use "Nonviolent Communication" if I think people will have some familiarity with the term. Or I'll choose a more topic-specific title, and mention the connection to NVC in the description. Another idea that just occurred to me is to call it "The Rosenberg Method", or "Rosenberg Consciousness". > I don't like common words in the English language being trademarked and then off-limits to others' use, so although I'm happy to avoid one term if so requested, I don't think it meets my need for ease in communication for the requested list of words to avoid to grow. Despite your first statement above, I really do believe that you're hearing demand rather than request. If you were hearing a request, your ease couldn't possibly be affected. If you heard a request, you wouldn't be thinking about trademarks (protecting and enforcing someone's "rights" against your wrongs) and "off-limits" language. If you heard a request, you'd simply look inside, notice the niggle, compare it with the joy of a small child feeding a hungry duck, and then decline the request. You'd have sense of complete freedom & lightness in doing so, knowing that the Universe contains infinitely many other strategies because the requested one. At your leisure, you might be delighted to meditate over other strategies for yourself as well. And you'd change your own strategy only when you found one you actually prefer. Joyful, self-ful choice, without any submission or rebellion. === * Engaging in some dialogs around CNVC's "request" not to use the name "Nonviolent Communication" (or "NVC"). See [[Using the name "Nonviolent Communication"| http://evolve.awakeningcompassion.com/?p=20]] and [[On "respecting" requests]]. * Eros rewrite ++++ * I switched to rendering outputs as //sources// of consumers. Doing so obviated the reference I was using to hold onto function values. Given @srca :: Source a@ and @oibsrc :: Source (OI b)@, we define @oifsrc :: Source (OI (a->b))@ simply as @oifsrc = liftA2 lam srca oibsrc@, where @lam a bo = \ ab -> bo (ab a)@. * Next puzzle: how to finish rewriting @wGetPut@ (for rendering an @OLambda@) in the simple form as @wGetGet@ and @wPutPut@. The challenge is how not to render the input and output //independently// and combine the result. I have a hunch it can be done if I have @WPut@ mimic @WGet@ in using the //partial// values monoid (represented as endomorphims, i.e., @a -> a@). Embed a @Source (Partial a)@ and a @Source (Partial (b -> o))@, //each// into a @Source (Partial ((a -> b) -> o))@ and @mappend@ the two results. If it works, it probably works without the @Source@ wrappers, and then use @fmap@ on each conversion, so try without @Source@ first. I tried Lennart Augustsson's [[Djinn| http://permalink.gmane.org/gmane.comp.lang.haskell.general/12747]] "coding wizard", which derives inhabitants for types. It derived @unFst@ and @unSnd@ (for @wGetGet@) but not the two embeddings I'm looking for. Playing some more, I come up with the the first one. ++++ \begin{code} -- | Provide in info about a function argument unArg :: Partial u -> Partial (u -> v) unArg = inPartial (flip (.)) -- \ uv -> \ u -> uv (uu u) -- | Provide info about a function result unRes :: Partial v -> Partial (u -> v) unRes = inPartial (.) unSrc :: Partial a -> Partial ((a -> b) -> o) unSrc = unArg . unArg unSnk :: Partial (b -> o) -> Partial ((a -> b) -> o) unSnk = ??? \end{code} === I'm not yet seeing how to define @unSnk@. * === * On {{{#haskell}}}, nominolo pointed me to [[Lisp Machine's "presentations"| http://www.lispworks.com/documentation/lw43/CLIM/html/climguide-92.htm#pgfId-383438]], which seems related to Eros. Type the text for 'New Tiddler' * More discussion about of the CNVC's request not to use "Nonviolent Communication". In response to Laurel. +++> > The request I am seeing now is to not use Nonviolent Communication, NVC, etc. because those are trademarked in some way. This is part of what I don't like about the "request" (or demand, as to be determined). The "because" part doesn't make sense to me in a way that's at all consistent with NVC Consciousness or Method. I don't see how could someone's trademark could be a life-serving "reason" for me to make a choice not to use it, other than protecting myself against their lawsuit. It's easy for me to interpret the "because ... trademark" as a threat or a "right", both of which I see as domination rather than partnership. I'd really love to connect with genuine Needs behind this request and related ones. The ones I've heard, I just don't buy. They sound sort of like needs to me, but just on the surface. > I have no objection to do this as long as I can use the phrase compassionate communication when advertising and nonviolent communication/NVC in the body of materials & advertising. Picking up on your phrasing, "as long as I *can* ..." (emphasis mine), I'm guessing you are hearing a demand (as I am) rather than a request. In other words, I'm guessing that you expect something less pleasant than genuine empathic connection with your needs, if you don't comply. > I do have some general comments about the request CNVC is making. The request is made on their web site and they do not provide a contact person for further discussion if a person's initial response is "no." I think making dialogue happen is a two-way street and I would appreciate them providing us with a contact person. In addition, a person who responds no can also contact them for further rounds of negotiation. Exactly! That's a big missing piece for me in this whole "request" thing. If I make a request and don't demonstrate any interest in hearing your response, how do we giraffe-dance? I'd have short-circuited the beautiful process that starts with "no" (shorthand for "I have needs also") and ends in connection and all-needs-met. > If this is truly a request, then answering either yes or no would be OK to CNVC. If they don't find it OK, then it's not really a request from their perspective. Exactly. > Laurel I'm encouraged & grateful for your words, Laurel. So often, I imagine I'm alone in how I see things. I get especially lonely & hopeless when I believe NVC folks (including most cnvc-certified people I've heard) are picking up some new words but missing the essence of NVC Consciousness (some specifics at http://evolve.awakeningcompassion.com). I'm craving to see a deep shift and to have community in bringing about such a shift. Hugs, - Conal === * And in reply to Erika +++> > Hi Conal, > It's good to hear your "voice." Let me try again. It feels satisfying to imagine I am contributing to CNVC by respecting their request and honoring their preferential strategy - that's my need I was talking about (contribution, imagining this strategy meets needs of ease, reassurance and comfort for them). If I understand you, you're saying that you imagine that cnvc's request is somehow life-serving, i.e., it effectively addresses genuine needs. Right? If so, that helps me understand our different responses, as I interpret their request quite differently. Given this difference, saying yes is a life-serving choice for you, and saying no is a life-serving choice for me. I'd love to co-explore the question of needs met by the cnvc strategy. I've never been able to believe the ones I've heard offered. In part, I think my understanding of needs differs from most people's. For example, I don't see ease, reassurance, or comfort as needs. If we pursue this question, I bet we'd find much yummier alternatives. I don't know if you or other people care much about the question of "which needs", while to me, it's *the* important question. If we miss the heart of the needs, then the essence will not be addressed by our strategies. That's exactly what I believe is happening with this and some other cnvc strategies. > I guess I'm not having much conflict here but I sense you are so I'd be curious as to your response to Joe's empathy guesses for you. Yes, I am having conflict with this strategy (and others). I understand it as not serving my needs, the network's, or humanity's. > To put it in perspective, before teaching NVC I taught childbirth education through an organization that [...] Wow -- I can see how cnvc might be a joy to work with in contrast to your other experience. > I have to admit, I feel some irritation about reading things like "And I suspect that "request" and "needs" language is being used for something that is neither" because i value directness, openness and honesty and wonder if you would be willing to contact someone at CNVC and tell them (if it's true) that you are unwilling to honor their request and wish to discuss a way to meet both their needs and yours (for autonomy?). Making that request of you meets my needs for integrity, to support reconciliation, inclusion and mutual resolution whether or not you agree to it, though I am curious if you'd be willing to do this or not and what needs are met by your choice. I have spoken with a few folks in cnvc, and I intend to do more, though I really don't know who's interested. I have a nice connection started with John Wiley (presiding over the cnvc board), and I'll see if he knows who'd want to hear my "no" and take up a dance from there. > I feel relief just being brave enough to admit that's important to me. The idea of someone deciding on their own not to follow the request without letting the requester know bugs me. It bugs me less to think of someone telling CNVC they won't follow it, having CNVC refuse to revise or negotiate their request (in which case the assumption of demand would be verified, not merely assumed) and then disregarding the request. That last scenario feels better to me as well. I still suspect their's some Power-Over stuff going on here. I'd appreciate you joining me in a little thought experiment. Suppose I place a request on http://awakeningcompassion.com that says I'd like CNVC to stop using the name "Nonviolent Communication" and call it "Rosenberg Consciousness" (because I genuinely prefer the latter), and another to stop and renounce certification (because I genuinely see it as deeply out of alignment with Rosenberg Consciousness), and another to drop their legal ownershop of "NVC". I'd include brief descriptions of needs met and then mention the page on a few mailing lists. Suppose they don't make the changes I request and go about doing as they see best, instead. Would that bug you in the same way? If not, why not? > I'll tell you a funny story. When my husband got out of the army, he moved to Seattle and called up his National Guard contact (which he was "supposed" to do). Rob informed this man that he would not be showing up for any monthly meetings as mandated. The guy was flabbergasted. He said many people get out of the army and don't follow through with their monthly commitment thereafter, but no one calls to tell them they won't be doing it! ;-) I love that story. Go Rob!, and thanks for the inspiration. > I like "the Rosenberg Method for Consciousness" ;-) Hm. I prefer two separate names, "The Rosenberg Method" (maybe "The *Evolving* Rosenberg Method", to remind people that it's always evolving) and "Rosenberg Consciousness". It's very important to me that people keep these two things clearly separate in their mind, so they'll distinguish between what is essential (the Consciousness) and what merely a (very helpful) strategy and therefore disposable. I don't see many NVC folks make this distinction. Ironically, to cling to the NVC "method" or "process" is to contradict the heart of NVC itself, namely focus on needs and release of all attachment to strategy. > I enjoyed what you said about language and cultural thought evolving together. It makes me think of readings I did long ago on the Whorfian hypothesis - some cultures do not have certain words in their language (ex. the Italians and "la privacy") - nor do they really have the concept. I think this is what Marshall is trying to get across with his descriptions of the Orang-Asli. It's also nice to think of the slow change in our culture as meeting some need (stability), as I'm frequently discouraged by the slowness of the transition to a more peaceful, inclusive society. Thanks for that connect. > Okay, feeling annoyed now: "I really do believe that you're hearing demand rather than request. If you were hearing a request, your ease couldn't possibly be affected." I like to have sovereignty over what I feel or am aware of inside and I'll ask you not to tell me what I feel or think. Double-ouch! -- one for you and one for me in reading your response. It's true that I didn't believe what you said. That's about me. I got contradictory info from your note, and I let you know what I believe and why. Of course, I can't know for sure what's true in you. And yet I leaned very strongly in one direction. > I'm not currently aware of hearing not using "nonviolent communication" as a demand. Are you ?!? I'd be more interested to hear about that. Maybe at some point I will have some awareness of that myself but I don't now. I do have a niggle over the idea of not using the terms compassion or empathy or compassionate communication - but according to my reading of their request, avoiding those phrases has not been asked of us. To me "nonviolent" isn't really an English word, rather a translation, so I'm much more comfortable avoiding it than a word that is actually part of my everyday language, like compassion. That's what I was referring to when I said I didn't want common language trademarked. So I'm still confused about what to believe. And some of my confusion is connected to my understanding that you don't hear a demand when you're happy to comply anyway (avoiding "nonviolent communication"), but you "have a niggle of irritation" when you imagine them asking you to something you don't want to comply with (avoiding "compassionate communication"). And my understanding of Marshall's notion of "request" vs "demand" has nothing to do with whether I want to do it. What might help me understand is this: if you decide that you want to use "nonviolent communication" in your talk title (e.g., for recognition among folks you expect to know that term), would you feel free to use the term, perhaps informing (not asking) cnvc. And the same question if CNVC added a request that you not use "compassionate communication", "observation, feelings, needs, requests", or "empathic listening". > But that is sheer projected fantasy - CNVC has not actually asked that and my feelings are from other organizations that have made demands of intellectual property out of language. I do have angry feelings over the power (over) that some institutions have given out - without consulting me (for instance, trademarking words or patenting life forms). I think that's where the tension you heard comes from. I'm feeling scared and powerless about that, but it's not happening here, in my estimation. Gotcha, I think. The "niggle of irritation" comes from the fantasy. I'm still confused though, as I understand your irritation (which I interpret as a demand-reaction) tied to whether or not the request interferes with your needs. > I'm actually, in hindsight, delighted that they asked us to avoid "nonviolent communication" because it has gotten me in touch with why that phrase doesn't really work for me and the richness of alternatives available...I've been more productive at brainstorming than if I had just, for ease and thoughtlessly gone on using "nonviolent communication." Hooray! > You said, "If you heard a request, you'd simply look inside, notice the niggle, compare it with the joy of a small child feeding a hungry duck, and then decline the request." I think that would only be true if I had only one need at a time. But I don't. I have the need for ease and autonomy, but I also have the need to live in community, contribute to others' wellbeing and work out agreements for behavior that work for all parties. If the neighbor's house was burning down, I wouldn't say it meets my need to rest. It has perhaps caused a slight unmet need for ease in me but the pleasure in respecting their request outdoes that for me. I don't think I would feel pleasure in respecting their request if I didn't feel in choice about it. I'd sure like to know you heard this. Would you be willing to tell me briefly what you think I'm saying? Thanks! I like this part of your note the best (well -- this and the Rob story), as it gives me some of the community engagement I'm looking for in consciously examining what we're thinking and doing, related to NVC. I think you're saying that's what's a joyful (or at least "choiceful") decision for you sometimes (often, I suspect!) involved more than one need. And so you distrust and disagree with a single-need perspective. And in any case, it's important to you to know that you've taken the space to make a conscious, centered choice. How's that? Going out on a limb a bit, my hunch is that (like me), it takes you longer to get to that place of knowing you're in choice if you hear a demand, especially when the request doesn't match your preference. I suspect that's the reason that (if I've understood), you link the question of request-vs-demand with the question of whether you want to do it anyway (triggering my confusion, as mentioned above). My hunch is that when in this conversation you've said you hear a request or hear a demand, what you mean is that you're in a comfortable space of choice within your self or you're not. I hope you don't take my previous sentence as criticism or correction. I actually like that meaning for "hear a demand", as it gives me my power back. If I don't want to hear demands, I only have to work on myself, not others. Whether I hear a demand is thus cleanly separated from whether someone else is "making a demand" in Marshall's sense, namely attachment to their strategy. And since that attachment is (or isn't) going on inside of them, it can't affect me. > Gee, I miss your full body presence in Seattle and our practice groups! I miss you too! This sort of discussion would be more fun in person, especially over margaritas or something. > Warmly, Erika Likewise, - Conal === * Wrote blog posts [[A type for partial values | http://conal-elliott.blogspot.com/2007/07/type-for-partial-values.html]] and [[Implementing a type for partial values| http://conal-elliott.blogspot.com/2007/07/implementing-type-for-partial-values.html#links]] :: 2007-07-03 [2007-07 day] * Playing with variants on the @lam@ function. Got some help from [[Djinn| http://lambda-the-ultimate.org/node/1178]] ++++ \begin{code} data O type Sink a = a -> O type SSink a = Sink (Sink a) ssink :: a -> SSink a ssink = flip ()   == \ a ao -> ao a

lam1 :: a -> Sink b -> Sink (a->b)
lam1 a bo = \ ab -> bo (ab a)

lam2 :: SSink a -> Sink b -> Sink (a->b)
lam2 aoo bo = \ ab -> aoo (bo.ab)
-- \ ab -> aoo (\ a -> bo (ab a))

{-
lam2 (ssink a) bo == \ ab -> ssink a (\ a -> bo (ab a))
== \ ab -> bo (ab a)
== lam a bo
-}

lam3 :: a -> SSink (Sink b) -> Sink (a -> b)
lam3 a booo = \ ab -> booo (\ bo -> bo (ab a))
-- \ ab -> wb (ab a) booo

{-
lam3 a (ssink bo) == \ ab -> ssink bo (\ bo -> bo (ab a))
== \ ab -> bo (ab a)
== lam a bo
-}

lam4 :: SSink a -> SSink (Sink b) -> Sink (a -> b)
lam4 aoo booo = \ ab -> booo (\ bo -> aoo (bo.ab))

lam5 :: SSink (SSink a) -> Sink b -> Sink (a->b)
lam5 aoooo bo = \ ab -> aoooo (\ aoo -> aoo (bo.ab))

lam6 :: SSink (SSink a) -> SSink (Sink b) -> Sink (a->b)
lam6 aoooo booo =
\ ab -> booo (\ bo -> aoooo (\ aoo -> aoo (bo.ab)))
-- but all of the following type-check as well
-- \ ab -> aoooo (\ aoo -> booo (\ bo -> aoo (\ a -> bo (ab a))))
-- \ ab -> aoooo (\ aoo -> aoo (\ a -> booo (\ bo -> bo (ab a))))
-- \ ab -> booo (\ bo -> aoooo (\ aoo -> booo (\ bo -> aoo (\ a -> bo (ab a)))))
\end{code}
===
* Still messing with my Eros reformulation. ++++
*  I don't know how to apply the "partial values" trick to "lambda" or pair output.  I think the reason is that although all pair-typed values really consist of first and second halves (pair surjectivity), a similar property does not hold for functions //from// pairs (and similarly for functions from functions).  Given @abo :: (a,b) -> o@, I don't know that @abo@ is some standard function of @ao :: a -> o@ and @bo :: b -> o@.  In fact, I know this property fails, since @abo@ could sum the pair components and convert the result.
*  Dropping partial values for outputs, I used a simple trick for pairs that I don't know how to extend.  I can map each of @OI a@ and @OI b@ to @OI (a,b)@, just by composing with either @fst@ or @snd@, and then sequence the two @OI (a,b)@.  The key is that one can extract @a@ and @b@ from the @(a,b)@.  To play this game with functions, we'd have to extract a @b@ from an @a->b@, or in some other way, turn an @OI b@ into a @OI (a->b)@.  For @a@, it's even trickier, since there's no @OI@.  That's where I use the variant @lam2@ from [[2007-07-04]].  Then I'd need to map @OI (OI a)@ to @OI (a->b)@, perhaps by extracting @OI a@ from @a -> b@.  I think I just need a different approach.
===
* More stimulating correspondence with Jacob Gotwals.
* A reviewer for my Eros paper wrote ++++>
p3: You call values of type @Output a@ "visualisers", but don't name values of type @Input a@. Why not use the terms "viewers" and "controllers", making a connection with the (related) Model View Controller pattern?
===
I like that suggestion very much.  Perhaps it's an idea for another paper, "Functional, Composable Model-View-Controller".  Add "model" for the (immutable) value being visualized.
* Updated web version of TypeCompose to 0.1.  //To do:// update dependent libs.  Which???
* Made Germany flight reservations on AA.  Still to do: reserve accommodations, arrange flight to
Seattle, register for HW, ICFP, and maybe IFL.
* For a home proxy, check out Squid or Apache.
* On road trip with Becky.
* In Chicago. Passenger side mirror on my Accord broke off.   Looking for Honda dealer to replace.  They can all order the part, but it would arrive after we leave for Atlanta.  I got some help locating a part at Curry Honda in Atlanta.  The locator process said it's the only one of that part in stock in a four-state region from here to Atlanta.  $254+labor. Then I tried car parts stores and found one that could get the part in two hours for only$60, and I could install it myself or get help.  They got the part, and Becky & I installed it.  I didn't know how to get the door panel back on, so I got help at a garage.  Everyone in the process was terrifically helpful.  The experience turned out to be very sweet for me.
* I keep running into dead-ends in typing my new, "simplified" Eros implementation.  Haskell's (even GHC's) type checking is pinching.  I think some extensions would make it work much better for me.  Collectively, I think of the extensions as using something like Lambda-Prolog for type checking.  Specifically, ++++
*  Allow conflicting--even equivalent--heads in type class instances.  After matching multiple heads, use backtracking search to find satisfiable clause preconditions.  Example ++++
\begin{code}
instance (Functor g, Functor f) => Functor (O g f) where
fmap h (O gf) = O (fmap (fmap h) gf)

instance (Cofunctor g, Cofunctor f) => Functor (O g f) where
fmap h (O gf) = O (cofmap (cofmap h) gf)

instance (Functor g, Cofunctor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (fmap (cofmap h) gf)

instance (Cofunctor g, Functor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (cofmap (fmap h) gf)
\end{code}
===
*  Universally quantified goals.  Example ++++
\begin{code}
pairMon :: ((forall a. Monoid (m a)), InPair (O m f)) => PairTy (O m f)
\end{code}
*  What about higher-order matching or unification?  For instance, eliminate explicit @newtype@ constructors @Id@ and @O@.
*  What about multiple solutions (not just multiple matches, but multiple solutions remaining after satisfying preconditions)?
*  Add a syntax for explicit type abstraction, such as @/\ t. t -> t@.
===
===

* Back from the cross-country Becky-and-Dad trip.  About 4000 miles for me, including the drive from home to Seattle.  Fun time.  Glad to be home.
* Still puzzling over how to drastically simplify my Eros implementation.   ++++
*  Question: can passing of the explicit type representations be somehow avoided?  For instance, could I use something like the @Typeable@ class?  I'd probably need existential types, which I'm now simulating.  And I'm not sure it would help.
*  Even when I get this simplification figured out, I don't have polymorphism.  Could a solution to polymorphism lead to a more elegant solution for monomorphism?
===
* Maybe I can make the Eros implementation simpler by making the problem harder.  Specifically, take on parametric polymorphism, which is a critical missing piece.  I have a sticky point now (only in my attempts at ultra-simplifying the implementation) of lifting arrows to arrows on pairs and functions.  I know how to do it, but I have to inject the missing type information, which complicates the interface a bit.  When I address polymorphism, I won't have to //be given// the missing type information, because that information will be //universal//!  Consider @firstT@ as example. ++++
\begin{code}
type Convert f a b = f a -> f b         -- ^ A conversion type

data Typed  f = forall a  . Typed  (Ty a) (f a)

type TArr (~>) a = Typed ((~>) a)

type ConvertT f a c = Ty c -> Convert f a c

firstT  :: Arrow (~>) => ConvertT (TArr (~>)) a (a,b)
firstT  tyab (Typed tya' a_a') = Typed (pairTy tya' (sndTy tyab)) (first  a_a')
\end{code}
===  Similarly for @secondT@ and @resutlT@.  Note that in @tyab@, the first part is ignored, and the second part is universal.

* Blog post "[[Abundance and Scarcity in the Consciousness and Practice of NVC| http://evolve.awakeningcompassion.com/?p=22]]"
* Oops!  I hadn't noticed that the second Haskell Hackathon will be held in Frieburg, immediately following ICFP.  I'd like to attend, so I'm going to change my plane tickets.
* Found a blog "[[living in reflective practice| http://livinginreflectivepractice.blogspot.com]]", which comments on one of my "NVC Evolves" posts.  Looks like a lot of content I'll like.
* Note to haskell list: "Type class instance selection & search" +++>
I keep running into situations in which I want more powerful search in selecting type class instances.  One example I raised in June, in which all of the following instances are useful.
\begin{code}
instance (Functor g, Functor f) => Functor (O g f) where
fmap h (O gf) = O (fmap (fmap h) gf)

instance (Cofunctor g, Cofunctor f) => Functor (O g f) where
fmap h (O gf) = O (cofmap (cofmap h) gf)

instance (Functor g, Cofunctor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (fmap (cofmap h) gf)

instance (Cofunctor g, Functor f) => Cofunctor (O g f) where
cofmap h (O gf) = O (cofmap (fmap h) gf)
\end{code}

My understanding is that this sort of instance collection doesn't work together because instance selection is based only on the matching the head of an instance declaration (part after the "=>").  I'm wondering why not use the preconditions as well, via a Prolog-like, backward-chaining search for much more flexible instance selection?  Going further, has anyone investigated using Prolog as a model for instance selection?  Better yet, how about [[LambdaProlog| http://www.lix.polytechnique.fr/Labo/Dale.Miller/lProlog]], which generalizes from Horn clauses to (higher-order) hereditary Harrop formulas, including (restricted but powerful) universals, implication, and existentials?  Once search is in there, ambiguity can arise, but perhaps the compiler could signal an error in that case (i.e., if the ambiguity is not eliminated by further search pruning).

My motivation: I've been playing with a programming style in which my type formulation leads to automatic construction of much of the code, thanks to use of Functor, Applicative, Monoid, and type composition.  An example is [[Applicative_data-driven_programming| http://haskell.org/haskellwiki/Applicative_data-driven_programming]], and I'm trying now to do the same to create a much simpler implementation of [[Eros| http://conal.net/papers/Eros]].  I think this programming style is what Conor was alluding to recently as "[[types don't just contain data, types explain data| http://article.gmane.org/gmane.comp.lang.haskell.cafe/26520]]".  (Conor: I hope you chime in.)  My hunch is that this programming style tends to run up against the head-only instance matching mechanism and would work much better with a more powerful means of selecting instances.
===
* And another +++
Some other instances that could work with backward chaining
\begin{code}
instance Monad m => Applicative m where
pure  = return
(<*>) = ap

instance (Applicative f, Monoid a) => Monoid (f a) where
mempty  = pure mempty
mappend = liftA2 mappend

instance (Applicative f, Num a) => Num (f a) where
(+)         = liftA2 (+)
fromInteger = pure . fromInteger
-- etc
\end{code}
Currently, I place such instance declarations in comments as boilerplate to be instantiated manually.
===
* ''Visualizing polymorphic values'' ++++
I'm pondering how to extend tangible values (see [[TV| http://haskell.org/haskellwiki/TV]] and [[Eros| http://conal.net/papers/Eros]]) to concretely visualize //polymorphic// values, such as the identify function.  Tangible values render as compositions of GUI elements: atomic widgets (sliders, text boxes, graphics canvases), as well as horizontal juxtaposition for pairs and vertical juxtaposition for functions.  I'm looking for a simple, general, and easy-to-guess interface design.

How can one //concretely// visualize a polymorphic value, such as @id :: forall a. a -> a@?  How to present input and output of an arbitrary type (@a@)?  When a values is given for the input and shown in the output, that value will be monomorphic.

Note that an input or output might not be fully polymorphic, e.g.,
\begin{code}
rotate :: forall a. Double -> Image a -> Image a
\end{code}

One idea is for polymorphic inputs & outputs to present the user with a menu of visualizations.  Choosing a visualization implies a (temporary) selection for one or more type variables, which then propagate to other instances of the type variable.  The GUI would dynamically change accordingly.

A variation on this idea is to use explicit type abstraction and require that the user instantiate type variables concretely before choosing visualizations for polymorphic inputs and outputs.  Then the interfaces offered would be much more specific.
===
* How to //input// functions. ++++
A function output consists of an argument input and a result output.
\begin{code}
OLambda :: Input (~>) a -> Output (~>) b -> Output (~>) (a->b)
\end{code}
Currently I have no generic support for function //inputs//.  Idea: simply use the dual of the output representation, i.e., an argument //output// and a result //input//.
\begin{code}
ILambda :: Output (~>) a -> Input (~>) b -> Input (~>) (a->b)
\end{code}
The idea is that if the system wants a function from the user, it sets up a way to sample the function by presenting argument values to the user and getting result values from the user.

One catch in this idea is that the user may provide different results for the same queried argument.  In other words, a more accurate type for @ILambda@ would yield @Input (IO (a->b))@.  I could address this possibility by some sort of value-based memoization, with a requirement like @Ord a@.

Hmm: inputs are supposed to be time-varying, so there'd have to be a way for the user to clear out the currently accumulated function data and start a new one.
===
* Unifying inputs & outputs ++++
Consider replacing the two GADTs @Input@ and @Output@ with a single GADT, say "@Interface@", that understands how to both get //and// present values of a given type.

Note: I can now define a @Functor@ instance for @Input@ and a @Cofunctor@ instance for @Output@.  A combined representation would probably require some kind of hybrid of @Functor@ and @Cofunctor@, taking a pair of mappings (which should be inverses).  (Consider possible connection with the paper "[[There and Back Again: Arrows for Invertible Programming| http://citeseer.ist.psu.edu/alimarine05there.html]]".)
===
* Signed up with [[citeulike| http://www.citeulike.org]] as user [[conal| http://www.citeulike.org/user/conal]].  There's now an easy "bookmarking" link on [[CiteSeer| http://citeseer.ist.psu.edu/alimarine05there.html]] pages.  Added my Eros paper bib.
Type the text for 'New Tiddler'
* Changing my domain registrar for conal.net from web.com to 1and1.com (where I have several other domains).
* Haskell list note, "Quantified class constraints" +++>
I'm developing a type constructor class and want the constraint @forall a. Monoid (m a)@ (for @m :: * -> *@), which is neither legal Haskell, nor supported by GHC.

As a work-around, I used the first encoding suggested in [[Simulating Quantified Class Constraints| http://www.citeulike.org/user/conal/article/1529146]] (Valery Trifonov, Haskell Workshop '03).  Add a type class
\begin{code}
class Monoid_f m where
mempty_f  :: forall a. m a
mappend_f :: forall a. m a -> m a -> m a
\end{code}
and an instance *schema*
\begin{code}
-- instance Monoid_f f where { mempty_f = mempty ; mappend_f = mappend }
\end{code}
to instantiate manually wherever necessary. For instance,
\begin{code}
instance Monoid_f f where { mempty_f = mempty ; mappend_f = mappend }
\end{code}
For instance,
\begin{code}
instance Monoid_f [] where { mempty_f = mempty ; mappend_f = mappend }
\end{code}

The paper's second approach is to replace the schema and multiple instantiations with a single instance.
\begin{code}
instance Monoid_f f => Monoid (f a) where
{ mempty = mempty_f ; mappend = mappend_f }
\end{code}
As the paper points out,
> Unfortunately, due to the type variable f in the head of the instance type, this declaration is not in Haskell 98; however, at least two implementations support extensions allowing such declarations.

Sadly, this solution runs into the problem of instance selection based only on head-matching, not back-chaining into constraints.  For instance, I'd like also to use the following "conflicting" declaration.
\begin{code}
instance (Applicative f, Monoid a) => Monoid (f a) where
mempty  = pure mempty
mappend = liftA2 mappend
\end{code}

What's the state of thinking & doing with regard to universally quantified class constraints?

Note that hereditary Harrop formulas do include universally quantified goals.  Less ambitiously, I think GHC's type-checker already deals with universally-quantified variables, so perhaps quantified constraints are not a great reach (just guessing).
===
* Started reading [[Type Classes With More Higher-Order Polymorphism| http://www.informatik.uni-freiburg.de/~neubauer/papers/icfp02.pdf]] (Matthias Neubauer and Peter Thiemann, ICFP '02), which discusses type lambdas.
* Stefan O'Rear pointed out a simple, genuine ambiguity in my example from yesterday ++++>
No quotes - [] is both Applicative and Monoid.  Should [String] ["ab","cd"] mappend ["ef","gh"] give ["ab","cd","ef","gh"] or ["abef","abgh","cdef","cdgh"]?
=== Thus there's no way to expect the compiler to pick one meaning.  Perhaps the whole enterprise of automatic code weaving from instances is bound to run into ambiguity problems.
* Updated my [[publications page| http://conal.net/papers]] to make the entries more uniform and provide only PDFs wherever possible.  Updated my CV correspondingly.
* Back to my Eros re-implementation.
* Make sure that [[TV| http://haskell.org/haskellwiki/TV]] is flexible enough that it can synthesize simple GUIs, but also, //interactively composable// versions (Eros).
* My new Eros implementation has both "sources" and "events"?  While @Source@ is an @Applicative@, @Event@ is just @Functor@.  Show both in [[Applicative data-driven programming| http://haskell.org/haskellwiki/Applicative_data-driven_programming]].
* Idea: Have @funFirstT@ (etc) transform type-tagged arrow-generating events into same.  It cannot know what monomorphic type to use for the second half of the pair, so //use partial types//.  After converting two such tagged events, combine the results, @mappend@ing the partial types to combine type info and @mappend@ing the events.  Interesting alternative to polymorphism.  Hm.  Can I somehow pull polymorphism out of a partial type?  Look for a lovely algebraic wrapper.  It may be very simple. +++
\begin{code}
type Convert f a b = f a -> f b         -- ^ A conversion type

newtype (f :*:  g) a = Prod (f a ,  g a)
newtype (f :->: g) a = Func (f a -> g a)

newtype Typed f = forall b. Typed (Ty :*: f) b

type ConvertT f = Convert (Typed f)

type PTyped f = (Partial O Ty) :*: f

type ConvertPT f = Convert (PTyped f)

first :: Arrow (~>) => (a ~> a') -> ((a,b) ~> (a'b))

-- Arrow tagged with result type
type TArr (~>) a = Typed ((~>) a)

firstPT  :: Arrow (~>) => ConvertPT (TArr (~>)) a (a,b)
firstPT  (Typed (Prod ptya' a_a')) =
Typed (Prod (unFstTy ptya') (first  a_a'))

-- TODO: abstract 'first' from 'firstPT'

funFirst   :: DeepArrow (~>) => (a ~> (d->a')) -> ((a,b) ~> (d->(a',b)))

-- Arrow tagged with function result type
newtype TypedFun f = forall d a'. Typed2 (Ty :*: f) (d -> a')

-- | Function extractor tagged with result function type
type TFExt (~>) a = TypedFun ((~>) a)

funFirstPT  :: DeepArrow (~>) => ConvertT (TFun (~>)) a (a,b)
funFirstPT  (TypedFun (Prod ptya' a_d_a')) =
TypedFun (Prod (pResultTy #### tyd (pairTy tya' (sndTy tyab)) (funFirst a_d_a')
\end{code}
===
* Oh!  Playing with this idea led me to type functions as arrows and as //deep// arrows.  Works out beautifully. +++
\begin{code}
type TyX a b = Ty a -> Ty b

newtype TyFun a b = TyFun { unTyFun :: TyX a b }

inTyFun :: (TyX a b -> TyX a' b') -> (TyFun a b -> TyFun a' b')
inTyFun = (TyFun .) . (. unTyFun)

instance Arrow TyFun where
first  = inTyFun (inPairTy . first )
second = inTyFun (inPairTy . second)
TyFun f >>> TyFun g = TyFun (f >>> g)

instance DeepArrow TyFun where
result = inTyFun (inFunTy . second)
idA    = TyFun idA
dupA   = TyFun (pairTy' . dupA)
fstA   = TyFun fstTy
sndA   = TyFun sndTy
funF   = TyFun $dsPairTy >>> first dsFunTy >>> \ ((c,a),b) -> c #-> (a #* b) ... \end{code} === TODO: explore making @Ty@ a GADT, and make these defs prettier. ++++ \begin{code} rAssocA = TyFun$ \ ((a :* b) :* c) -> (a :* (b :* c))
\end{code}
=== On the other hand, I might have to deal with pattern matching failures.
* I indulged in a metalinguistic chat session on #haskell IRC about (a) whether Haskell has verbs and (b) distinguishing between syntax vs semantics.
* Transferred conal.net from web.com to 1and1.com.
* Playing with QuickCheck, starting with @Data.Partial@.  How to generate arbitrary partial values of a given type?  Simply by generating arbitrary endomorphisms.  The cool thing about this approach is that I automatically get random generation of filler for the undefined bits.  Code: +++
\begin{code}
instance Arbitrary a => Arbitrary (Endo a) where
arbitrary   = fmap Endo arbitrary
coarbitrary = coarbitrary . appEndo
\end{code}
where
\begin{code}
class Arbitrary a where
arbitrary   :: Gen a
coarbitrary :: a -> Gen b -> Gen b
\end{code}
===
* Imagine using partial //visualizations// of typed values.  They could be assembled from parts, and then have the missing parts filled in automatically from a default.
* To run Eros (avoiding problem with hs-plugins on WinXP & ghc-6.6) ++++
*  Create an empty directory {{{O/}}}
*  Recompile modules to make {{{.o}}} files.  Don't bother with main.
*  In shell, hide {{{.o}}} files: {{{mv *.o O/}}}.
*  Start ghci and load app, e.g., {{{:load Main}}}.  Because I hid my {{{.o}}} files, my code gets loaded interpreted.
*  In shell, uhide {{{.o}}} files: {{{mv O/*.o .}}}
*  In ghci, run the app, e.g., {{{main}}}.  Now {{{hs-plugins}}} finds compiled code when it needs to.
===
* I got my polymorphism-via-partial-types scheme written & compiling.  It got very simple when I let go of making my definitions at the level of type-to-type functions.  I haven't put it all together, and now I'm having doubts.  The embedded transformations etc have lots of undefined places.  How will they get filled in?  For instance, ++++
\begin{code}
emb1 :: (c~>c') -> ( (a->(f,b->(c,g)),e) ~> (a->(f,b->(c',g)),e) )
emb1 = first . result . second . result . first
\end{code}
=== Given a transformation @xf :: c~>c'@, the embedding will yield @emb1 xf@, together with a partial type @(_->(_,_->(c',_)),_)@.  I could then apply the underlying endomorphism and apply it to the type of the whole value being transformed, i.e., @(a->(f,b->(c,g)),e)@.   Is that choice correct in general?  I doubt it.  Consider embedding a //polymorphic// arrow, such as @swapA :: forall a b. (a,b) ~> (b,a)@.
* Alternative: Represent a polymorphic arrow as a //function// from domain types to range types.  Here's some magic: I discovered recently that my (typed) type of type representations is a deep arrow.  Thus I can simultaneously transform values with their types.  I lose context-based type inference and polymorphic constants.  I already use this sort of type-function in Eros, so I'll just be cleaning them up and using them more centrally.  Hm.  How to formulate my tagged arrows?  With my partial-type idea: ++++
\begin{code}
data TArr (~>) a = forall a'. TArr (ParTy a') (a ~> a')
\end{code}
=== In the type-function idea, I'd want to have @arr :: a ~> a'@ and a @tyf :: Ty a -> Ty a'@. ++++
\begin{code}
data TArr (~>) a = forall a'. TArr (a TyFun a') (a ~> a')
\end{code}
=== where @TyFun@ is the deep arrow of type transformations.  Then, for instance ++++
\begin{code}
firstT  :: Arrow     (~>) => TArrX (~>) a (a,b)
firstT  (TArr ta_a' a_a') = TArr (first  ta_a') (first  a_a')
\end{code}
=== This approach works out brilliantly!
* I switched the typed type representation to a GADT, for convenient pattern matching.  Very nice!  I love the simplicity and clarity of the new @Arrow@ and @DeepArrow@ instances. +++
\begin{code}
instance Arrow TyFun where
TyFun f >>> TyFun g = TyFun (f >>> g)
first  (TyFun f) = TyFun $\ (a :* b) -> (f a :* b) second (TyFun g) = TyFun$ \ (a :* b) -> (a :* g b)

instance DeepArrow TyFun where
result (TyFun g) = TyFun $\ (a :-> b) -> (a :-> g b) idA = TyFun id dupA = TyFun$ \ a -> a :* a
fstA     = TyFun $\ (a :* _) -> a sndA = TyFun$ \ (_ :* b) -> b
funF     = TyFun $\ ((c :-> a) :* b) -> (c :-> (a :* b)) funS = TyFun$ \ (a :* (c :-> b)) -> (c :-> (a :* b))
funR     = TyFun $\ (a :-> c :-> b) -> (c :-> a :-> b) curryA = TyFun$ \ ((a :* b) :-> c) -> (a :-> b :-> c)
uncurryA = TyFun $\ (a :-> b :-> c) -> ((a :* b) :-> c) swapA = TyFun$ \ (a :* b) -> (b :* a)
lAssocA  = TyFun $\ (a :* (b :* c)) -> ((a :* b) :* c) rAssocA = TyFun$ \ ((a :* b) :* c) -> (a :* (b :* c))
\end{code}
===
* Eros: Stitching together the pieces for rendering inputs & outputs.  I notice that there's a lot of commonality to factor out in forming & dissecting pairs & "copairs". ++++
\begin{code}
class CoPair f where
coFst  :: f a -> f (a,b)
coSnd  :: f b -> f (a,b)

coPair :: Monoid_f f => f a -> f b -> f (a,b)
a coPair b = coFst a mappend_f coSnd b
\end{code}
===
* Wowzers! I have a lot of @Copair@ instances, including partial values and my Eros combinators. ++++
\begin{code}
instance Copair Partial where
cofst = inPartial first
cosnd = inPartial second

instance DeepArrow (~>) => Copair (TArr (~>)) where
cofst = firstT
cosnd = secondT

instance DeepArrow (~>) => Copair (TFun (~>)) where
cofst = funFirstT
cosnd = funSecondT

instance DeepArrow (~>) => Copair (TInp (~>)) where
cofst = inpFirstT
cosnd = inpSecondT
\end{code}
=== The other two Eros combinators (@result@ and @funResultT@) are methods on another class, which I could call "@Cofunc@".  Also, @Partial@ is a @Copair@ instance.
* More ambiguous type class instances. ++++
\begin{code}
-- standard Monoid instance for Applicative applied to Monoid
instance (Applicative (O g f), Monoid a) => Monoid (O g f a) where
{ mempty = pure mempty; mappend = (*>) }

instance Monoid (g (f a)) => Monoid (O g f a) where
{ mempty = O mempty; mappend = inO2 mappend }
\end{code}
===
* Idea: create an automated build system (like "make") that maintains the required build actions //incrementally//.  Do it via change listeners.
* Blog note ++++
The @(***)@ operator is very handy for constructing functions (or other arrow things) that map pairs to pairs.  On functions:
\begin{code}
(***) :: (a -> b) -> (a' -> b') -> ((a,a') -> (b',b'))
f *** g = \ (a,b) -> (f a, g b)
\end{code}
Very handy for function of one argument, but sometimes I find myself wanting to do something similar with (curried) functions of multiple arguments.  An example of that sort of pattern is the @Monoid@ instance for pairs (found in [[Data.Monoid| http://darcs.haskell.org/ghc-6.6/packages/base/Data/Monoid.hs]]):
\begin{code}
instance (Monoid a, Monoid b) => Monoid (a,b) where
mempty = (mempty, mempty)
(a,b) mappend (a',b') = (a mappend a', b mappend b')
\end{code}
To look even more like @(***)@, massage that @mappend@ definition as follows:
\begin{code}
mappend = \ (a,b) (a',b') -> (mappend a a', mappend b b')
\end{code}
Rearranging a bit,
\begin{code}
mappend == \ (a,b) (a',b') -> (mappend a *** mappend b) (a',b')
== \ (a,b) -> (mappend a *** mappend b)
\end{code}
We could go further, but the definitions get murky:
\begin{code}
mappend == \ (a,b) -> (uncurry (***)) (mappend a, mappend b)
== \ (a,b) -> (uncurry (***)) ((mappend *** mappend) (a,b))
== (uncurry (***)) . (mappend *** mappend)
\end{code}

To get a prettier formulation, let's start with a variation on @(***)@:
\begin{code}
($*) :: (a -> b, a' -> b') -> (a,a') -> (b,b') ($*) = uncurry (***)
\end{code}
In other words,
\begin{code}
(f,f') $* (a,a') = (f *** f') (a,a') == (f a, f' a') \end{code} The advantage of @($*)@ over @(***)@ is that it consumes and produces only pairs.  It takes a pair of functions and a pair of arguments and produces a pair of results.  That property lets is cascade nicely where @(***)@ doesn't.

\begin{code}
mappend = \ (a,b) (a',b') -> (mappend a a', mappend b b')
== \ (a,b) (a',b') -> (mappend a, mappend b) $* (a',b') == \ (a,b) (a',b') -> (mappend,mappend)$* (a,b) $* (a',b') == \ p q -> (mappend,mappend)$* p $* q \end{code} So \begin{code} p mappend q = (mappend,mappend)$* p $* q \end{code} === * More ambiguous type class instances. ++++ \begin{code} -- standard Monoid instance for Applicative applied to Monoid instance (Applicative (O g f), Monoid a) => Monoid (O g f a) where { mempty = pure mempty; mappend = (*>) } instance Monoid (g (f a)) => Monoid (O g f a) where { mempty = O mempty; mappend = inO2 mappend } \end{code} === * Idea: create an automated build system (like "make") that maintains the required build actions //incrementally//. Do it via change listeners. * Blog note +++ The @(***)@ operator is very handy for constructing functions (or other arrow things) that map pairs to pairs. On functions: \begin{code} (***) :: (a -> b) -> (a' -> b') -> ((a,a') -> (b',b')) f *** g = \ (a,b) -> (f a, g b) \end{code} Very handy for function of one argument, but sometimes I find myself wanting to do something similar with (curried) functions of multiple arguments. An example of that sort of pattern is the @Monoid@ instance for pairs (found in [[Data.Monoid| http://darcs.haskell.org/ghc-6.6/packages/base/Data/Monoid.hs]]): \begin{code} instance (Monoid a, Monoid b) => Monoid (a,b) where mempty = (mempty, mempty) (a,b) mappend (a',b') = (a mappend a', b mappend b') \end{code} To look even more like @(***)@, massage that @mappend@ definition as follows: \begin{code} mappend = \ (a,b) (a',b') -> (mappend a a', mappend b b') \end{code} Rearranging a bit, \begin{code} mappend == \ (a,b) (a',b') -> (mappend a *** mappend b) (a',b') == \ (a,b) -> (mappend a *** mappend b) \end{code} We could go further, but the definitions get murky: \begin{code} mappend == \ (a,b) -> (uncurry (***)) (mappend a, mappend b) == \ (a,b) -> (uncurry (***)) ((mappend *** mappend) (a,b)) == (uncurry (***)) . (mappend *** mappend) \end{code} To get a prettier formulation, let's start with a variation on @(***)@: \begin{code} ($*) :: (a -> b, a' -> b') -> (a,a') -> (b,b')
($*) = uncurry (***) \end{code} In other words, \begin{code} (f,f')$* (a,a') = (f *** f') (a,a')
== (f a, f' a')
\end{code}
The advantage of @($*)@ over @(***)@ is that it consumes and produces only pairs. It takes a pair of functions and a pair of arguments and produces a pair of results. That property lets is cascade nicely where @(***)@ doesn't. \begin{code} mappend = \ (a,b) (a',b') -> (mappend a a', mappend b b') == \ (a,b) (a',b') -> (mappend a, mappend b)$* (a',b')
== \ (a,b) (a',b') -> (mappend,mappend) $* (a,b)$* (a',b')
== \ p q -> (mappend,mappend) $* p$* q
\end{code}
So
\begin{code}
p mappend q = (mappend,mappend) $* p$* q
\end{code}
===
Rewrite to get to the point more quickly.
* I got the pieces type checking for very modular versions of @pair@ for @WGet@ and @WPut@ for Eros. ++++
\begin{code}
type WGet (~>) = Ty :->: (WinIO O ((Event O TInp (~>)) :*: Source))

u :: DeepArrow (~>) => PairTy (WGet (~>))
u = pair

type PutStuff (~>) o =
(Event O TArr (~>) :*: Event O TFun (~>)) :*: (Source O Flip (->) o)

type WPut (~>) o =
(->) (Event (TTArr (~>))) O (Ty :->: (WinIO O PutStuff (~>) o))

v :: (DeepArrow (~>), Monoid o) => PairTy (WPut (~>) o)
v = pair
\end{code}
=== Because I couldn't use a single definition of @Pair@ for type composition, I have things like the following. ++++
\begin{code}
type WinIO = (->) Win O (IO O LaidOut)

instance Pair f => Pair ((->) a O f) where pair = apPair
instance Pair f => Pair (IO     O f) where pair = apPair
instance Pair f => Pair (WinIO  O f) where pair = ppPair

-- Standard instances for (h O f) when f is applicative
instance Pair Source where pair = apPair
instance Monoid_f f => Monoid_f (Source O f) where
{ mempty_f = O (pure mempty_f); mappend_f = inO2 (liftA2 mappend_f) }
instance Pair f => Pair (Source O f) where pair = apPair
\end{code}
===
* Correspondence with Marc Weber, who has Phooey set up in [[Nix| http://nix.cs.uu.nl]], a purely functional package manager.
* Ordered a second GB of RAM for my laptop.
* Bill H installed an air conditioner for our office.  Whew!
* My Eros reformulation is missing the internal handles & handling for pair inputs & ouputs.  Pondering how they might fit into the elegant @Pair@ framework.  When do I want to insert something for the pair along with the two halves?  The handle widgets & their layout, the events they generate when selected, and at an output when a compatibly-typed "seed" arrow is chosen.
* Yoiks: [[30 inch LCD monitor with 2560x1600 resolution for $1300| http://www.newegg.com/Product/Product.aspx?Item=N82E16824001098]]. Maybe give Holly my 24 inch monitor. Oh, oops. My laptop's GeForce Go 7700 doesn't seem to drive that resolution. * Idea for a simple, powerful & invisible "make" facility. ++++ * Treat mutable files (e.g., source code modules) as time-varying "sources" and processing tools (compilers, etc) as pure functions. * Use applicative data-driven computation to wire up networks of sources & processors. * Use file change notification to integrate the file system into the data-driven framework. * Laziness is essential, so that recompilation etc happens only when really required. Data-driven propagation of thunks. When accessed & used, the real computation happens. * For this laziness trick to work, I guess the file system would have to cooperate. Could we have a kind of file system that stores thunks and only evaluates them on demand? * Here's a nifty use of the applicative functor interface. Considering the presence of compilers, even tools (code) are sources (variable), so we get to use the @<*>@ operator. When the tool or its input (argument) changes, the result is recomputed. * This idea would eliminate an explicit "make" tool altogether. * I'd probably want a more sophisticated implementation of data-driven computation than I have now. * Some computation could be done speculatively, when there extra cycles are available. * Give the system some clues about when to recompute eagerly, such as opening a source in an editor. * Instead of altering a file system, use an "abstract file system" (AFS), layered on top of a conventional file system (CFS). The AFS may simply be a computation workspace (e.g., Hugs or GHCi top-level sesion). Make bidirectional adaptors between CFS & AFS. For CFS -> AFS, import a conventional, externally-mutable file as a value source. For AFS -> CFS, export a value source to a file, so that the file gets written whenever the source changes. * Another layer of sophistication: study possibilities for tracking //incremental// changes and corresponding incremental recomputation. Check out [[Monads for Incremental Computing| http://www.citeulike.org/user/conal/article/1554283]] by Magnus Carlsson and [[Adaptive Functional Programming| http://www.citeulike.org/user/conal/article/1554299]], by Umut Acar, Guy Blelloch and Bob Harper. In any case, both are relevant to my "[[applicative data-driven computation| http://haskell.org/haskellwiki/Applicative_data-driven_programming]" work & paper (in draft). * Improve my data-driven stuff for the case when an input changes but corresponding output (dependent source) doesn't. Could be a simple wrapper @eqCheck :: Eq a => Source a -> Source a@. * Similarly, add a caching/memoizing wrapper @memo :: Hashable a => Source a -> Source a@. === * For my Eros pair-related classes, consider handling @WPut@ differently. Now I use @Source O Flip (->) o@, and I set up instances so that this type constructor is a @Pair@ type. I could instead define a function for for sinks. ++++ \begin{code} opair :: (Functor f, Pair f, Monoid o) => f (a -> o) -> f (b -> o) -> f ((a,b) -> o) fao opair fbo = fmap fo (fao pair fbo) fo :: Monoid o => (a->o, b->o) -> (a,b) -> o fo (ao,bo) (a,b) = ao a mappend bo b \end{code} === But this idea doesn't work out, for two reasons. ++++ * I don't have a @Functor@ instance for @f :->: g@. Tried to fix with a new class. ++++ \begin{code} class BidirFunctor f where bidirfmap :: (a -> b) -> (b -> a) -> f a -> f b instance (Arrow (~>),Functor f, Functor g) => BidirFunctor (Arrw (~>) f g) where bidirfmap h h' = inArrw$ \ fga -> arr (fmap h') >>> fga >>> arr (fmap h)
\end{code}
=== But I don't have an inverse for @of@.
*  The sink types (@a -> o@, etc) would be propagated in as type arguments, rather than just the domain of the sinks.
===
* I'm enchanted with the data-driven build idea from [[2007-08-11]], including build steps as pure functions and using the applicative functor interface.  I'm wondering how to get recompilations not to get backlogged.  For instance, suppose each "save" (or "checkpoint"), or even each keystroke, triggers a recompilation.  In other words, if recompilations get triggered more frequently than they can get executed.  Note that this same issue arises in window-system programming, for mouse and repaint handling.  Mouse and repaint events are coalesced.  Generalize that trick so that it covers recompilation as well.
* Injured my left foot while working outside this morning.  I'm treating it at home for now.
* Correspondence with Marc Weber.  He's interested in project collaboration.
* Making lovely simplifications in Eros.  I want to release a new TypeCompose, but haddock chokes on it.  TypeCompose is holding up Phooey, in which I just fixed some version skew.  Responded to David Waern, whose last year's Summer-of-Code project was ghc.haddock.
* I think the pieces are now in place for Eros.  Next, tie it into TV.  I guess I'll want to alter TV, which now assumes an arrow interface.
* On IRC chat, [[ClaudiusMaximus| http://claudiusmaximus.goto10.org]] (Claude Heiland-Allen) mentioned that he's doing functional imaging using arrows.  I never thought of that generalization, and it seems a great idea.  He's doing video with feedback, and so ended up sampling & recomputing lots of previous (continuous) frames.  We got to talking, and I remembered my idea of lazy infinite image pyramids (but infinite in extent, so the word "pyramid" is not quite ideal).  And that gave me the idea that such a thing would be another arrow instance.  I love that idea!  BTW, the pyramid would probably be a zipper, so it could be infinite in extent and detail, and efficiently perusable.
* Claude pointed out that I had some bad html showing up in a Pan gallery.  I fixed dozens of (auto-generated) files.
* Tidbit on Haskell optimization ++++>
> And I have a question:  given that I have a Pentium-M cpu that supports SSE2, what are the best flags to pass to ghc to make use of those? Perfect accuracy isn't an issue, I want "as fast as possible".
There's been some discussion of this on haskell-cafe recently.  As I understand it, the relevant options are:
{{{
-fexcess-precision -fvia-C -optc-O2 -optc-mfpmath=sse -optc-msse2 -optc-march=pentium4
}}}
===
* I installed ghc-6.7.20070810-i386-unknown-mingw32.exe.  Though it runs, I don't know how to get other packages installed, including Cabal.  "ghc-pkg list" tells me that I have only {ghc-6.7.20070810}, rts-1.0.  I asked the cvs-ghc for help.  Meanwhile I backed out to ghc-6.6, so I can keep working.
* Found that 20070802 works, but it seems to have changed the rules about deriving. ++++
Code:
\begin{code}
-- | Pairing for unary type constructors.
newtype Pair1 f g a = Pair1 {unPair1 :: (f a, g a)}
deriving (Eq, Ord, Show)
\end{code}
Error message:
{{{
src/Data/Tupler.hs:26:0:
No instances for (Show (g a), Show (f a))
arising from the 'deriving' clause of a data type declaration
at src/Data/Tupler.hs:(26,0)-(27,25)
Possible fix:
add an instance declaration for (Show (g a), Show (f a))
When deriving the instance for (Show (Pair1 f g a))
}}}
I sent a note to cvs-ghc.
===
* Eros is going great.  It keeps getting simpler & lovelier.  Several pieces along the way that will make interesting blog posts.  What's next? ++++
*  Get haddock.ghc going, so I can make docs again
*  Simplify Phooey as in my "[[Applicative data-driven computation|  http://haskell.org/haskellwiki/Applicative_data-driven_programming]]" draft.  Eliminate the Arrow version. Keep a new Monad version (from the draft) for composing with Source to make the AF version.
*  Change TV to use AF instead of Arrow.  I already have a prototype version in Eros.
*  Map from TV's Input & Output types to Phooey.  Really, just make sure that Phooey implements Pair, Fun, and Title, and there's nothing else to do.   Eliminate the mtl dependence, now that I'm using AF composition instead of monad transformers.
*  Finish Eros, by plugging in some actual widgets.
*  Release it all.
*  Write some blog posts.
===
* Phooey work ++++
*  Cloned the project at 1.2.1.  Made a tag & tarball dist ({{{make darcs-tag darcs dist}}}).
*  Changed version number to 2.0 (major).
*  Changes to {{{phooey.cabal}}} ++++
*   Removed {{{mtl, arrows}}} from {{{Build-Depends}}}
*   Removed modules {{{TagT}}}, {{{LayoutT}}}, {{{Monad}}} and {{{Arrow}}} in {{{Graphics.UI.Phooey}}}.  Also {{{Arrow}}} and {{{Monad}}} in {{{Examples}}}
===
* Removed {{{mtl}}} line from {{{Makefile}}}
===
* Yesterday it occurred to me that my Event representation is a continuation-based computation (e.g., @Control.Monad.Cont@ in the {{{mtl}}} package).  This morning, {{{ski}}} on {{{#haskell}}} said the same thing.  I got intrigued about the connection between events and continuation computations, and how to think about @Cont r@ as a monoid.  Ski said it's "the monoid related to discarding and duplicating the current continuation", which makes a lot of sense to me after a bit of thought.  I decided to make the plunge and use @Cont@ directly for the general case, and define @Event = Cont (IO ())@ as a special case.  Most of my gestural composition stuff generalizes nicely, and introduced classes @InputHandle@ and @OutputHandle@ with @IO ()@ instances.  Hm.  Doesn't really let me abstract over differences between underlying GUI libs, however, since they'd probably all use @IO ()@.
* I've moved more stuff from Eros into TypeCompose.  Now I split off a new package "DataDriven".
* ghc-6.7 has broken some of my "deriving" clauses.  I sent a note asking if something has changed and what can be done.  Waiting.
* Misc to do on my Haskell projects: ++++
*  Rename Graphics.UI.Eros.Render to Graphics.UI.Eros.  [done]
*  Rename Graphics.UI.Phooey.Applicative to Graphics.UI.Phooey.  [done]
*  Update TV to work with AFs instead of arrows.  Move Input & Output from Eros.
*  In DeepArrow, use @:*:@ instead of @Prod1@.
*  GuiTV.  Still necessary??
*  Work out how to share widgets between Phooey & Eros.  Maybe a wrapper function that turns a Phooey UI into
*  How could I make Eros, and maybe Phooey, more restrictive, so that widgets cannot hide state (mumble).
*  Do something with Eros's Partial, PartialTy.
*  Check projects for any darcs-missing files.  See [[darcs tips]]
*  Unix mode
*  Make list of blog posts and start writing.
===
* Afterthought from [[#haskell NVC chat| http://tunes.org/~nef/logs/haskell/07.08.15]]: when people struggle with NVC language, trying to "talk around" what they really think, it's because the language and the thinking don't match.  When the thinking transforms to match the language, the talking becomes fluid.  The same is true with imperative programmers who've recently learned the Haskell language, but not yet the thinking of pure functional programming.  Shared these thought and had a [[#haskell-blah chat on NVC| http://conal.net/misc/2007-08-17-haskell-blah.html]].
* Reworking TV.   ++++
*  Switching from arrows to AFs.  I think I need two of type constructor args, @dom@ and @ran@. ++++
\begin{code}
-- | An /Output/ describes a way to present a functional value, perhaps
-- interactively.  It is the user-interface half of a tangible value.
data Output dom ran :: * -> * where
-- | Output primitive
OPrim :: ran a -> Output dom ran a
-- | Visualize a function.  Akin to /lambda/
OLambda :: Input dom  a -> Output dom ran b -> Output dom ran (a->b)
-- | Visualize a pair
OPair :: Output dom ran a -> Output dom ran b -> Output dom ran (a,b)
-- | Title/label an output
OTitle :: String -> Output dom ran a -> Output dom ran a

type TV dom ran = Output dom ran :*: Id
\end{code}
===
* How to run a TV?  What to do with a @Output dom ran a@ and an @a@?  Maybe a class. ++++
\begin{code}
type OI a = a -> IO ()

class ToOI ran where ToOI :: ran a -> OI a
\end{code}
===
===
* Pondering different Monoid instances for Maybe. ++++
\begin{code}
instance Monoid (Maybe a) where
mempty = Nothing
Nothing mappend b = b
a       mappend _ = a
\end{code}
=== vs ++++
\begin{code}
instance Monoid a => Monoid (Maybe a) where
mempty           = Nothing
mb mappend mb' = Just (unMaybe mb mappend unMaybe mb')

unMaybe :: Monoid a => Maybe a -> a
unMaybe (Just a) = a
unMaybe Nothing  = mempty
\end{code}
=== The first one is more flexible, while the second is more informative.  See the [[hpaste session| http://hpaste.org/2319]]
* Mark Wassell is [[extending Phooey| http://www.haskell.org/haskellwiki/Extending_Phooey]] for his [[HGene| http://www.haskell.org/haskellwiki/HGene]] project.  He's done a very clever thing, introducing feedback for "persistence", as needed to make an editor.  Ponder feedback, including how to formulate and implement for AFs.
* TODO: rename "dom" & "ran" to "src" & "snk".
* Will Phooey fit into my new TV framework?  @src == UI@, but what's "snk"?  Maybe @UI O OI@.  (Or maybe more general: @UI O Flip (->) o@ for an arbitrary monoid @o@.)  But then I cannot use the AF application combinator @<*>@ to apply an output to an input.  Hm.  I'd hoped to have just a @src@ parameter, and use @src (a -> o)@ instead of @snk a@, i.e., without the @Flip@.  That pattern works for @IO@ and for Phooey @UI@, but not for Eros, which accumulates different information for inputs vs outputs.
* Hm. Is it possible to eliminate the input/output distinction?  Maybe have only inputs, representing outputs as @Input (a -> o)@?  Add a new type class @OPair@ to augment @Pair@.  Maybe eliminate @Copair@.  Maybe @OPair@ is easily definable without a class.  If this idea can work, I think I'd have to change Eros to collect both input & output information all the time.  Unless there's a unification I haven't seen in the Eros theory.
* Re-reading my Eros paper for inspiration for unification.  Thoughts ++++
*  An |alpha| output is really an |alpha -> o| input.
*  Inputs can be edited.
*  An Eros visualization/output is an alternative view on an |alpha -> o| input.  Flip it over to the other side to see it as an input.
*  Editing that input means providing another way to //visualize// the value, because |alpha -> o| is the visualization function.  For instance, |o == String| or |o == ImageC|.
*  For instance, that's how the user says to change the bounds on a slider.  Or to change from a standard function view to an image view.
===
* Hey -- I thought my @Event@ type was a functor, but //not// an AF.  I just realized that @Event == Cont@ and @Cont@ is a monad and therefore an AF.  What gives? ++++
\begin{code}
newtype Cont r a = Cont { runCont :: (a -> r) -> r }

return a = Cont ($a) m >>= k = Cont$ \c -> runCont m $\a -> runCont (k a) c \end{code} Dropping the @newtype@ wrappers, \begin{code} type Cont r a = (a -> r) -> r return a c = c a (m >>= k) c = m (\a -> (k a) c) \end{code} For AF, we have @pure = return@ and @<*> = ap@. For @pure@, I get exactly one invocation of the listener, when it's installed. What about @<*>@? \begin{code} (fab <*> fa) c = (liftM ($) fab fa) c

liftM ($) fab fa = do { ab <- fab ; a <- fa ; return (ab a) } == fab >>= \ ab -> fa >>= \ a -> return (ab a) == \ c -> (fab >>= \ ab -> fa >>= \ a -> return (ab a)) c == \ c -> fab (\ ab -> (fa >>= \ a -> return (ab a)) c) == \ c -> fab (\ ab -> fa (\ a -> return (ab a) c)) == \ c -> fab (\ ab -> fa (\ a -> c (ab a))) \end{code} which of course is what I'd expect for function application in a continuation semantics. What I think this means is that //whenever// @fab@ occurs, its listener adds a new subscriber to @fa@. The apn's continuation @c@ therefore gets invoked on the cross product of the occurrences of @fab@ and @fa@. This semantics is like @[]@ (backtracking), while I want something more like the @ZipList@ instance of @Applicative@. That's what @Applicative@ on @Source@ does. === * Made arrangements for Germany trip ++++ * Arranged to stay at the [[Black Forest Hostel| http://www.blackforest-hostel.de]] 25.Sept-08.Oct (12 days), in a six-bed dorm. I'll probably have to change rooms one of the nights. Must reconfirm three days before (22 Sept, on Tasha's wedding day) -- 011 _49 761 / 881 78 70. * Changed my flight to return on Oct 8 (added$190).
*  Registered for ICFP, Haskell Workshop, and banquet (460 Euro).
*  Registered for IFL.  Cost is 75 Euro, due Sept 12 by bank transfer.  My bank charges, I think, but maybe Joseph could help.
===
* My Phooey users I've heard from so far are using the monadic interface, which I'm in the process of removing.  Keep it instead, with Applicative being a simple layering.  Oh -- wait.  I want to define my applicative UI from simple applicative pieces.  I forgot that I wouldn't get a Monad layer for free.  Hm.
* Mads presented a problem  on the [[Phooey Talk page| http://www.haskell.org/haskellwiki/talk:phooey]].  His example that presents subtotals and a total.  I wrote a response on that page.  Here's my reformulation. ++++
\begin{code}
basket = title "Shopping List" $do f <- fruit showFruit <- title "Fruit"$ showDisplay f
t          <- tools
showTools  <- title "Tools"  $showDisplay t showBasket <- title "Basket"$ showDisplay $liftA2 (+) f t return$
showFruit mappend showTools mappend showBasket
\end{code}
=== and another one ++++
\begin{code}
-- Display and pass along an "intermediate result", following an idea of Mads.
ir :: Show a => String -> UI (Source a) -> UI (Source a, Source ())
ir str ui = do x <- ui
showx <- title str $showDisplay x return (x,showx) basket :: UI (Source ()) basket = title "Shopping List"$
do (f,showFruit) <- ir "Fruit" fruit
(t,showTools) <- ir "Tools" tools
showBasket <- title "Basket" $showDisplay$ liftA2 (+) f t
return $showFruit mappend showTools mappend showBasket \end{code} === * The example got me thinking. I don't know how to use a value twice in the applicative interface. I guess there has to be an explicit dup (as arrows): @pure dup :: UI (a -> (a,a))@. * More thoughts on Mads's "basket" example from yesterday ++++ * Note that by swapping @(f,showFruit)@ to @(showFruit,f)@ and similarly with tools, we get what looks like uses of the writer monad (implicit @mappend@). If I add a @Writer (UI Source)@ to the @UI monad@, the example might come out looking like this. ++++ \begin{code} basket :: UI () basket = title "Shopping List"$
do f <- ir "Fruit" fruit
t <- ir "Tools" tools
title "Basket" $showDisplay$ liftA2 (+) f t
\end{code}
=== The return type has no @Source ()@, which would be implicitly included in @UI@.
*  Since each source is used just once, this version may lend itself well to an AF formulation. ++++
\begin{code}
basket = title "Shopping List" $title "Basket"$ showDisplay <**>
liftA2 (+) (ir "Fruit" fruit) (ir "Tools" tools)
\end{code}
===
* What can we do with @ir@'s definition?  New monadic version: ++++
\begin{code}
ir :: Show a => String -> UI (Source a) -> UI (Source a)
ir str ui = do x <- ui
title str $showDisplay x return x \end{code} === Messing about ++++ \begin{code} (m >>= \ x -> g x >> return x) == (m >>= g mappend return) \end{code} === So, ++++ \begin{code} ir str ui = (title str . showDisplay) mappend return \end{code} === === * I want to address recursion in AF Phooey. Mark Wassell has a [[nice use| http://www.haskell.org/haskellwiki/Extending_Phooey]] for a table of info. Mads Lindstroem has some [[other uses in mind| http://www.haskell.org/haskellwiki/Talk:Phooey]. Pick something simple: an event that increments a number. Since sources are implicit, model events as Maybe-valued sources. Monadic version: ++++ \begin{code} counter = do b <- title "press me"$ button 0 1
let s = liftA2 (+) s incr
title "count" $showDisplay s \end{code} === where @button@ makes a button UI with the given values for not pressed & pressed. Hm. Really a recursive source, not a recursive UI. Play with it anyway. Raises interesting question of recursively defined sources. What terminates updating? Checking for equality. Can't be built-in, since not all types are @Eq@. Also, there's nothing that says to start at zero. * Wait a minute! Counting doesn't have to be recursive. ++++ \begin{code} counter = do e <- title "press me"$ button
count <- eAccum 0 (fmap (const (+1)) e)
title "count" $showDisplay count \end{code} === We can hide the event altogether: ++++ \begin{code} buttonAccum :: a -> (a -> a) -> UI (Source a) buttonAccum a f = do e <- button eAccum a (fmap (const f) e) counter = do count <- title "press me"$ buttonAccum 0 (+1)
title "count" $showDisplay count \end{code} === In AF form, ++++ \begin{code} counter = title "count" showDisplay <**> title "press me"$ buttonAccum 0 (+1)
\end{code}
===
* Since AFs hide sources, every UI has to be a single source.  If I expose a "button" UI element, it has to be a source rather than an event.  So, I use the most general form I know for converting an event into a source, namely @eAccum@.
* "Non-coercive communication" is a clearer description than "NVC".  Look for a positive replace to "non-coercive" and a broader replacement for "communication".
* Return to reworking TV, started on [[2007-08-17]].  I gave @Output@ two (type constructor) arguments: @dom@ and @ran@, e.g., Phooey's @UI@ and @UI O OI@.  Now I have a problem: the @OI@ constructor prevents applying the basic AF application operators @(<*>)@ and (its reversal) @(<**>)@.  Some choices: ++++
*  Add operators @(#)@ and @(##)@ that simply strip off the @OI@ and apply the standard operators.  Use those instead of the standard ones.
*  Use the non-@OI@ types and make an @OI@ adaptor for use with TV.
*  Try replacing @src@ and @snk@ as TV type arguments with @f@ and @o@, where the output type is @f (b -> o)@.  Figure out how to fit Eros into this model, since Eros now has rather different type constructors for input & output.  I don't know how to make that unification work.
===
* Compiling wxHaskell for ghc-6.7. ++++
*  Copied some {{{include/wx}}} files.
*  Copied {{{include/gmp.h}}} from an older ghc-6.7.  Missing in the one I'm running.
*  In {{{makefile}}}, removed -fvia-C from {{{WXCORE-HCFLAGS}}} & {{{WX-HCFLAGS}}} and removed {{{-fPIC}}} from {{{WXC-CXXFLAGS}}}.  The former got around a bug in ghc-asm.  The latter eliminated lots of warning messages.
===
* Got IO and (applicative) Phooey UI working with new TV.  Next is Eros!  How to introduce widgets to Eros?  Leverage Phooey.
* Putting pieces together to make a new Eros top level.
* Changed my @TTArr@ type.  It contained a source type and a matching @TArr@.  Now it just has a function from source types to @TTArr@s.  Works out very well.  Simpler and more general than before.  Amenable to polymorphic values.
* Hm.  Eros @WGet (~>)@ is not a functor, and @WPut (~>)@ is not a cofunctor. ++++
*  I used functor/cofunctor to define @Read@ inputs & @Show@ outputs.
*  The reason @WGet (~>)@ isn't a functor is that it wants type information.
*  Idea: define classes similar to Functor & Cofunctor that includes type transformations: ++++
\begin{code}
class FunctorTy f where
fmapTy :: (a TyFun b) -> (a -> b) -> f a -> f b
\end{code}
=== Hm.  Note similarity with @TArr@: ++++
\begin{code}
data TArr (~>) a = forall a'. TArr (a TyFun a') (a ~> a')
\end{code}
===
===
* More & more pieces coming together & simplifying in the new Eros.
* I don't think there's anything special about TVs, and very little of the system knows about them.  My current definitions: ++++
\begin{code}
-- | Tangible values (TVs).
type TV src snk = Output src snk :*: Id

-- | Arrow on 'TV's
type TVFun src snk = OFun src snk ::*:: (->)
\end{code}
=== I think the important bit is that @::*::@ lifts deep arrows.  I've been keeping a lot of generality in type signatures.  There's probably more to be gotten.
* I think I can improve on my type of dynamically polymorphic arrows (arrow with computed result type).  Here's some code (type-checks): ++++
\begin{code}
{-# OPTIONS -fglasgow-exts #-}

import Control.Arrow
import Control.Arrow.DeepArrow

import Control.Compose ((::*::))

import Data.Ty

-- Simplify & generalize the representation of arrows with computed result
-- types.  Currently:
--
--   data TArr (~>) a = forall a'. TArr (a TyFun a') (a ~> a')
--
--   firstT  :: Arrow     (~>) => TArrX (~>) a (a,b)
--   firstT  (TArr ta_a' a_a') = TArr (first  ta_a') (first  a_a')

--  The new idea is to break @TArr@ into two pieces: @TyFun ::*:: (~>)@
-- hiding result parameter.  First hiding of type arguments.

-- | Hide the type parameter of @g@ (which may be a partial application)
data All g = forall b. All { unAll :: g b }

-- | Transformations on 'All' types
type AllX ar u v = All (ar u) -> All (ar v)

-- Now specialize to Arrow & DeepArrow:

-- | Transformations on 'Arrow' 'All' types
type AllXA u v = forall ar. Arrow ar     => AllX ar u v
-- | Transformations on 'DeepArrow' 'All' types
type AllXD u v = forall ar. DeepArrow ar => AllX ar u v

firstT' :: AllXA a (a,b)
firstT'  (All p) = All (first p)

resultT' :: AllXD b (a->b)
resultT'  (All p) = All (result p)

-- Finally, combine with computed result type:

-- | Binary type constructor with computed hidden second parameter.
type TArr (~>) a = All ((TyFun ::*:: (~>)) a)
\end{code}
===
* What about @TTArr@ (both parameters hidden)?  Currently, ++++
\begin{code}
data TTArr (~>) = TTArr (forall a. Ty a -> Maybe (TArr (~>) a))
\end{code}
===  This definition could generalize as well.  An isomorphic variation: ++++
\begin{code}
type TTArr (~>) = All (Ty :-> Maybe O TArr (~>))
\end{code}
=== Though not quite, since @TArr@ is a partially applied type synonym, which GHC spurns, to preserve first-order matching.
* Idea for more convenient GADT-based term rewriting: Add a typed @Literal@ type with a bunch of known constants and a catch-all. ++++
\begin{code}
data Literal : * -> * where
Literal    :: a ->                    Literal a
Fst        ::                         Literal ((a,b) -> a)
Plus       :: forall a. Num a =>      Literal (a -> a -> a)
Pi         :: forall a. Floating a => Literal a
Sin        :: forall a. Floating a => Literal (a -> a)
...
\end{code}
=== Then I think I can write simple, typed simplifiers.  Try it out.
* It's a hassle to carry around the arrow type parameters.  And in the end, Eros really works with arrow-universal things.  So I gave a try at making my transformation embedders, function extractors, and input extractors (@TArr@, @TFun@, and @TInp@) be explicitly universal over DeepArrow types.  The code gets much quieter. ++++
\begin{code}
-- 'DeepArrow'-universal arrow
newtype a :~>: b = UD (forall (~>). DeepArrow (~>) => a ~> b)

instance Arrow (:~>:) where
arr f         = UD $arr f UD f >>> UD g = UD$ f >>> g
first  (UD f) = UD $first f second (UD f) = UD$ second f
UD f *** UD g = UD $f *** g UD f &&& UD g = UD$ f &&& g

instance DeepArrow (:~>:) where
result (UD f) = UD $result f idA = UD idA fstA = UD fstA dupA = UD dupA sndA = UD sndA funF = UD funF funS = UD funS funR = UD funR curryA = UD curryA uncurryA = UD uncurryA swapA = UD swapA lAssocA = UD lAssocA rAssocA = UD rAssocA -- I'd love to use these functions above, but I don't know how to get them -- to type-chedk. -- -- onUD h (UD f) = UD (h f) -- onUD2 h (UD f) (UD g) = UD (h f) (h g) -- | Arrow with computed result type data TArr a = forall a'. TArr (a TyFun a') (a :~>: a') \end{code} === Combining with my previous idea: ++++ \begin{code} type TArr a = All ((TyFun ::*:: (:~>:)) a) \end{code} === * Hey! Replace my arrow newtypes @TyFun@, @OFun@, etc, with a single @Fun@: ++++ \begin{code} newtype FunA f a b = FunA (f a -> f b) \end{code} === Then make a single @Arrow@ and a single @FunArr@ instance. Oh, wait. @Arrow@ will be tricky. See @OFun@ & @Ty@ for examples. What about @FunArr@? Oh, and make a new type-class to handle @Arrow@. * More thoughts on the data-driven build idea mentioned on [[2007-08-11]] and [[2007-08-13]]: ++++ * "Make" systems rely on purely functional build steps. Otherwise, they couldn't know when to rebuild. * Represent each build result as a pair of (a) output, and (b) possible success value. ++++ \begin{code} type MakeG o = (,) o O Maybe type Make = MakeG String \end{code} === When @o@ is a @Monoid@, @MakeG@ is an AF. Build messages accumulate in the @o@ part (e.g., @String@ in @MakeG@). Application (@<*>@) combines messages in any case (via @(,) o@) and combines successes (via @Maybe@), failing (@Nothing@) if function or argument fails. Nice! Note that @Nothing/Just@ replaces the Unix tradition of the nonzero or zero return code. * Unlike a conventional "make", there's no way to get the rules wrong. Dependencies are determined automatically. * Static typing: executables are typed functions, and other data are typed first-order values. * Implementation question: how to wrap up OS executables as pure functions? If they work with stdin/stdout, then play with pipes, @unsafePerformIO@, and @read/show@. * Compilers are pretty file-oriented. What to do? * C-like languages have the {{{#include}}} directive, which implies a compile dependency. Some tools automatically extract those dependencies and place them in a makefile. Instead, describe the program as an explicit application of a parameterized program to those include files. When an include file changes, the application changes and so gets recompiled. When an included file {{{#include}}}s another, there's another explicit application. The applications could simple be concatenations. * By decomposing processors (e.g., compilers), we may be able to save some work, re-using earlier results when an intermediate value stays the same. For instance, if {{{cpp}}} strips out comments, and someone just tweaks a comment, all post-cpp phases are avoided. * By decomposing input data (e.g., source code), we may be able to skip a lot of processing. For instance, make each top-level definition be its own component, composed together. When one definition changes, just recompile it. Depends a lot on how the compiler works. * I think these last two tricks are what [[Adaptive functional programming| http://www.citeulike.org/user/conal/article/1554299]] is about. See also [[Monads for incremental computing| http://www.citeulike.org/user/conal/article/1554283]] (done better as an AF). * Work out scheduling so that invalidation happens eagerly but actual builds lazily. May be simple. Example: suppose a computed value is to get displayed in a widget or written to a file. If the change listener makes the foreign call, the computed result will get fully evaluated during marshalling. In the case of a widget, there could be several values written before an actual repaint, depending on repaint scheduling, and a lot of work could be wasted. Instead, just schedule the thunk in an "update map", keyed by the widget & property. In the idle loop or a second thread, execute and empty the update map. If an entry gets updated twice between executions, the earlier updates are discarded and their computations don't happen. Maybe use STM. * Example: every keystroke, or every save. * Lazy output widgets ... scrolling triggers evaluation. Maybe by line. Cf {{{Data.ByteString}}}. === * Thinking again about an old dream for graphics rendering: a lazy, data-rich representation of infinite and continuous images. ++++ * Infinite image pyramids. First idea: each node has one "block" (pixel or array of pixels) and four sub-pyramids for higher resolution. * Note there's significant overlap between levels. Second idea: each level has a stream of progressively more accurate blocks, all of the same resolution. The first block is a straight sampling (once per pixel). The rest are formed by element-wise averaging the level's stream elements (starting with the sampling one) with each of the four sub-streams. * Next idea: have //bi-infinite// approximation streams. Besides refining, have a progressively coarsening stream, built out of quarters of the ancenstor head approximation and its coarsenings. For instance, if I'm am the top-left child of my parent, I extract & coarsen the top-left quarter of my parent's approximation. * What about infinite extent? Have the "pyramid" also be bi-infinite, i.e., infinitely up (zoom out) and infinitely down (zoom in). What's a bi-infinite tree? I don't know, but I think it has the same zipper type as a conventional infinite tree. * Optimize rendering for a particular block and resolution. The block lets us cull out "geometry", and the resolution lets us set level of detail. These two adaptations counter-balance -- more work in one means less in the other. * Do 3D with @Image (Point,Color)@. * How to apply spatial transformations to our representation? What resolution in for what resolution out? Depends on the properties of the spatial transformation. Idea: do IA to bound magnification/minification. * For animated and/or interactive imagery, there are arbitrarily many dimensions to the problem, rather than just two. Do a similar pyramid-zipper for these parameters as well. Look for a general scheme for representing functions, including arbitrarily curried ones. May be something like a generalized trie. To represent a function @f :: a -> b@, make a pyramid zipper indexed by @a@, holding approximations for @b@ values over regions. If @b@ is a function also, then its approximations are represented via another pyramid zipper. First work out for @a == R@, and then generalize. Handle product domains as in generalized tries, using the currying isomorphism. === * Yesterday I did a bunch of clever stuff with "universal arrows", but it didn't quite work out in practice. The problem: when I get a function extractor (ultimately from selecting on input in some lambda), I can "apply" it to the containing TV, to get a function-valued TV, but then I can't re-universalize that TV. * I have a new Eros working! As usual, it took a long time to get it to type-check, and then it mostly worked. Issues: ++++ * Widgets don't stretch [fixed] * There are no default values for Read inputs [fixed] * Type-compatibility highlighting works for ints & functions, but not bools. The logic works the color-set call happens, but with no visible result. Hm. A problem with the checkBox widget, which don't set the background color even when given at initialization. * Make some sexy examples. * I don't know how someone can tell what's going on. There's no feedback saying what the TVs are, other than interacting with them. * Input state is forgotten during gestural composition, as before. Think about this one. * Consider drag & drop. * Outputs can be selected and edited. * Make TV inputs/output stop listening to events when they go away. I don't yet have a mechanism or interface for //removing// a listener. Idea: Have @subscribe@ take a listener and a stop-listening event. Pass stop-listening down during UI construction. === * Nice trick picked up from Saizan on {{{#haskell}}}: Start with @join :: Monoid m => m (m b) -> m b@ and specialize to functions: @join :: (a -> a -> b) -> (a -> b)@. So @join f@ applies @f@ to the same argument twice. A fun use: @join (liftA2 (,)) [0..3]@ produces the cross product of a list with itself. Here's another, from Cale ++++ {{{ Cale: > map (ap (,) (^2)) [1..10] lambdabot: [(1,1),(2,4),(3,9),(4,16),(5,25),(6,36),(7,49),(8,64),(9,81),(10,100)] ddarius: :t ap (,) lambdabot: forall a b. (a -> b) -> a -> (a, b) }}} === What about binary operators other than @(,)@, e.g., @(:)@ or @(+)@? More great stuff at the [[Blow your mind| http://www.haskell.org/haskellwiki/Blow_your_mind]] wiki page. * Idea: Sorting & parsing are offline algorithms, having the property that no output can be produced until all input is consumed. For parsing, reasons include errors and operator pair precedence. Still, a lot of useful results //are// ready long before the input is consumed. I bet there's a useful perspective shift that would help. Decompose such functions in a way that the intermediate representation has intermediate info available early. * Eros thoughts ++++ * Reverse the order of input/output selection, so that the user chooses the output before the input. Then the experience is more like conventional interactive apps. For instance, one selects some text and clicks on ''bold''. Better yet -- allow both orders. * For a more familiar experience, make the new TV //replace// the TV that contained the output. Maybe also replace the one that contained the input. Oh -- allow TVs to be "pinned" or unpinned. An unpinned TV disappears after it's used in a composition. * Let users dynamically alter UIs: title add/change/remove, change slider bounds. Maybe change the widget type altogether, though that change could also be done with a handful of identity-function TVs. === * Idea for addressing latency in a web browser: if a page (picture, etc) is in the cache, then display it immediately, without checking for a newer version. Then check for a newer version and update the displayed page. Further: compute an edit that modifies the shown page, drawing attention to the changes. Maybe a mode that highlights the revisions in some way. Could show cross-outs & insertions. * The [[Amazon Honor System| http://zme.amazon.com]] makes it easy for people to donate money in appreciation of web content & services. Anyone with an Amazon account can donate$1 or more.  Amazon's cut is 2.9% plus 30 cents per transaction.
* [[Haskell for pluggable apps, with the GHC API| http://programming.reddit.com/goto?id=2izu4]]
* Eros ++++
*  Added image output.  :) !
*  Adding vector input (spatial) for translation & scaling.  Getting deeper into some wxHaskell.  Tried to make a "vector" attribute by mapping over the @on mouse@ attribute.  Doesn't really work.  I want a //filtered// version of @on mouse@.  Oh!  Use my own event algebra, where I have filtering etc.
===
* Use [[markdown| http://daringfireball.net/projects/markdown]] to write things like {{{TODO}}} and {{{CHANGES}}} files.  Use [[PanDoc| http://sophos.berkeley.edu/macfarlane/pandoc]] to convert from markdown to html, pdf (via latex), etc.  PanDoc has some handy features: smart quotes, dashes, and ellipses, auto-gen'd TOC, ASCIIMathML, footnotes, & more.  I installed PanDoc and converted over my Eros TODO.  To generate {{{TODO.html}}}, just {{{pandoc --toc --smart --standalone -o TODO.html TODO}}}.  Works great!
* Found a [[markdown mode| http://jrblevin.freeshell.org/software/markdown-mode]].  Nice highlighting, but lacks paragraph fill support.  Some bugs.  I fixed some but not all.  Emailed the author.
* Read quickly through Wouter Swierstra's [[Data Types a la Carte| http://www.cs.nott.ac.uk/~wss/Publications/DataTypesALaCarte.pdf]].  Charming & clever solution to "the expression problem".
* Emacs hacking to streamlining my Haskell hacking workflow. ++++
*  In a source file, {{{f10}}} loads the module into ghci, after cd'ing to the ancestor {{{source}}} directory.
*  In a source file, {{{f11}}} does a {{{make cabal-install}}}, in the ancestor {{{source}}} directory.
*  {{{\C-c\C-j}}} appends the current region to a file in the local called {{{Junk}}} plus the extension of the current buffer.  Use it just before a big modification or deletion.
*  {{{f9}}} does {{{next-error}}} (more convenient for repeated use than {{{M-}}}).
===
* Good progress simplifying my window event stuff.  Will make a nice separate library, between @DataDriven@ and @Phooey@ or @Eros@.
* Puzzling over my image output.  I wrote it such that changing the image changes the {{{on paint}}} method.  Makes sense, but it forces me to resample on every repaint.  Instead, I'd like to compute the sample array as a function of image and window size (in pixels), so whenever either changes, the array would update.  Nice & simple.  The catch: the output panel is at once an output for images and an input for sizes.  Hmm.  Think of it as an output for functions from size to images.  No -- functions from size to arrays (of that size).  Oh!  it's a //source of// image sinks.  In fact, //all// of my outputs are sources of sinks.  This is the only single-widget output that produces a non-static source of sinks.  Yes -- this insight worked out very well.  I separated my image output into an output of discrete (& finite) "images" and a pure function from window size & continuous image to discrete image.  Repainting is super fast, because it's re-using a pre-rendered discrete image.
* Next: figure out why initial size is narrower than I'd expect.  (Probably because the automatic resize during initial layout is missed, and I'm just using the requested size.)  Also, why aren't the sliders or handles stretching?
* Crashing bug: if a control is closed and then another one is opened, Eros crashes.  I suspect the crash comes from setting some state that's been reallocated.
* How to get event listeners to be removed?  In particular, when a control is closed, make sure its listeners go away.  How?  I think I'll have to change the @Event@ representation & interface. ++++
\begin{code}
-- | Event listener.  Consumer and quit event
data ListenerG o a = L (a -> o) (o -> o)

-- | Event, general form.  A means of consuming listeners.
newtype EventG o a = EventG (ListenerG o a -> o)

-- | Event, general form.  A means of consuming listeners
newtype EventG o a = EventG (ListenerG o a -> o)

-- | Listeners specialized to IO
type Listener = ListenerG Action
-- | Events specialized to IO
type Event = EventG Action

subscribe :: (a -> o) -> (o -> o) -> EventG o a -> o
subscribe sink quit (EventG f) = f (ListenerG sink quit)
\end{code}
===
* Hmm.  Maybe there's only one problem spot: highlighting of candidates for transformation.  I've worked around this problem for now by preventing destruction of any frames.  They get hidden instead of closed.  Look for a better solution.
* Cheering graphic, seen on #haskell: {{{\o| \o/ |o/}}}.
* Hm: I just remembered about my [[idea| using TiddlyWiki to view programs]] for converting Haskell programs into tiddlywikis.
* Here's an approach to "limited listeners": have the @subscribe@ operation return an @unsubscribe@ action. ++++
Currently
\begin{code}
type News o m = m o -> m o
\end{code}
The new definition:
\begin{code}
type News o m = m o -> m (m o)
\end{code}
On the RHS, the first @m@ is the //callback//, the second is the //subscription// action, and the third is the //unsubscription// action.

Now, does @mappend@ do what we want?  Assuming @m@ is an AF (probably a monad),
\begin{code}
n mappend n' == \ cb -> n cb mappend n' cb
== \ cb -> liftA2 mappend (n cb) (n' cb)
\end{code}
When @m@ is a monad and @o == ()@, @liftA2@ on @m@ is @liftM2@, and @mappend@ on @m o@ is @(>>)@, so
\begin{code}
n mappend n' == \ cb -> liftM2 (>>) (n cb) (n' cb)
== \ cb -> do unsub  <- n  cb
unsub' <- n' cb'
return (unsub >> unsub)
\end{code}
Perfect!  Unsubscribers combine automatically.

\begin{code}
mempty == pure (pure (pure mempty))
== const (return (return ()))
\end{code}

It remains to construct primitive @News@ values.  How to handle subscription and unsubscription?  Here's my current primitive builder:
\begin{code}
mkNews :: (Monoid (m o), RefMonad m r, Monoid o) =>
(m o -> m o) -> m (m o -> m ())
mkNews setNotify =
do  ref <- newRef mempty              -- holds subscribed actions
setNotify (join (readRef ref))    -- execute combined actions

r (m o) -> (m o -> m ())
addL ref = \ cb -> modifyRef ref (mappend cb)
\end{code}
More tightly,
\begin{code}
addL ref = modifyRef ref . flip mappend cb
\end{code}

This version of @mkNews@ stores an //action// in a ref.  Accumulation happens through @mappend@ (typically @(>>)@).  How to selectively remove actions from the middle of a @mappend@ chain?  Add a bit of indirection, via another ref.  Initialize that new ref to contain the subscribed action, and change to @mempty@ on unsubscription.
\begin{code}
r (m o) -> (m o -> m (m ()))
addL' ref = \ cb -> do ref'   <- newRef cb
modifyRef ref (mappend join (readRef ref'))
return (writeRef ref' mempty)
\end{code}
===
* Reading through {{{Data.Source}}}, I have my doubts about the following function. ++++
\begin{code}
-- | Create a source from an initial value and a update-function event.
-- Specialized type:  mkAccumE :: a -> Event (a->a) -> IO (Source a)
a -> Cont (cur o) (a -> a) -> cur (SourceG (cur o -> cur o) f a)
mkAccumE a (Cont kk) =
do ref <- newRef a
let news act = kk (\ f -> do modifyRef ref f
act)
\end{code}
The installed event listener is not idempotent, as it incrementally modifies @ref@.  For example, construct
\begin{code}
do s <- mkAccumE 0 (fmap (const (+1) click)
return (liftA2 (+) s s)
\end{code}
The resulting summing source has as its news, @news mappend news@, where @news@ is as made by @mkAccumE@.  Therefore, in response to @click@, the ref gets incremented twice.  Conversely, suppose the source @mkAccumE@ source gets made, then several clicks happen, and //then// someone starts using the source.  It won't have been accumulating.  Idea: separate out the ref updating from the news.
\begin{code}
mkAccumE a (Cont kk) =
do ref <- newRef a
kk (modifyRef ref)
return (O (\ act -> kk (const act), readRef ref))
\end{code}
I think it's crucial here that event listeners are executed in the order in which they were registered.  Is that the case?  Oops!  It wasn't, but is now.  (Lesson: watch for excessive cleverness in tersifying code.)
===
* How would the registered accumulating listener ever get removed? +++
Have subscription also return an unsubscribe action.  I could add a finalizer on the ref to stop the accumulation.
\begin{code}
mkAccumE a (Cont kk) =
do ref <- newRef a
kk (modifyRef ref) >>= addFinalizer ref
return (O (\ act -> kk (const act) , readRef ref))
\end{code}
Oh -- oops!  The finalizer will never get executed because of the (strong) pointer inside the listener.  Solution: use a weak ref in the listeners.  Historical aside:
I guess it's no coincidence that weak references are the ticket.  Data-driven evaluation for Fran is what motivated me to collaborate with Simons PJ & Marlow on what became [[Stretching the storage manager: weak pointers and stable names in Haskell| http://citeseer.ist.psu.edu/peytonjones99stretching.html]].  Now I can finally write a paper about GC vs data-driven computation, which was my contribution to "Stretching" but got cut out of the final version.
Idea: have events save only //weak pointers// to their listeners.  On each event occurrence, dereference the weak pointers and invoke the still-living listeners.  Do the same for the "news" part of sources.  Some possibilities & questions:
*  Can I hide the weak ref & addFinalizer stuff altogether?  Try!  See below.
*  Finalizers for Eros windows?
*  Weak refs & AFs.
*  Look for a trick for short-cutting past dead weak references, so they don't have to be repeatedly skipped.  Unlike a general reference changing to a value like @mempty@, we know that a dead weak ref will stay dead.
*  Can I make do //entirely// with weak references, in place of returned unsubscribe actions?
*  Write a wonderful paper that shows the various options.
===
* Make sources that invisibly handles weak refs & finalization.  ++++
Maybe in the primitives, not the representation.  First try the primitives.  Returning to @mkNews@ above, go back to the simple new representation, and redefine @addL@ to manage weak references.
\begin{code}
addLW :: Monoid o => IORef (IO o) -> (IO o -> IO ())
addLW ref = \ cb -> do wcb <- mkWeakPtr cb Nothing
try <- tryWeak wcb
modifyRef ref (mappend try)

tryWeakTo :: o -> Weak o -> IO o
tryWeakTo dflt w = fmap (fromMaybe dflt) (deRefWeak w)

tryWeak :: Monoid o => Weak o -> IO o
tryWeak = tryWeakTo mempty
\end{code}
Simple!  Note that (a) @cb@ is no longer used after @mkWeakPtr@, and (b) the weak pointer is only passed into @tryWeak@.  Combine these two functions.
\begin{code}
weakenTo :: a -> a -> IO a
weakenTo dflt a = mkWeakPtr a Nothing >>= tryWeakTo dflt

weaken :: Monoid o => o -> IO o
weaken = weakenTo mempty

addLW ref = \ cb -> do cb' <- weaken cb
modifyRef ref (mappend cb')
\end{code}
Next note that @cb@ is used only in the first computation.  Rewrite to emphasize this property, use the @(>=>)@ operator.
\begin{code}
addLW ref = weaken >=> \ cb' -> modifyRef ref (mappend cb')
\end{code}
or simply
\begin{code}
addLW ref = weaken >=> modifyRef ref . flip mappend
\end{code}
It turns out that the only change necessary is to prepend @weaken >=>@.  Wow!
===
* Now events: +++
\begin{code}
-- | Handy function for making nontrivial events.  Takes a
-- |setHandler|'' function that (destructively) assigns a single sink to
-- be invoked upon some system event.  The subscribing sinks are
-- accumulated into a single, sequenced sink.  Specialized type:
--
-- @
--   mkEvent :: Sink (Sink a) -> IO (Event a)
-- @

mkEvent :: (Monoid (m ()), RefMonad m r) =>
((a -> m ()) -> m o) -> m (Cont (m ()) a)
mkEvent setHandler =
do  ref <- newRef mempty  -- accumulated sink
setHandler (\ a -> readRef ref >>= ($a)) return$ Cont (modifyRef ref . flip mappend sink)
\end{code}
===
* Note: sources & news are handled so similarly, I may want to unify them, via @News o = Cont o ()@.
* I made these changes, including unifying events and "news".  I'm having a failure, apparently in mkEvent.  When I mappend two events, the earlier one gets lost.
* Keeping the result/output type generic in continuation/event functions is pretty handy.  Consider this example. ++++
\begin{code}
-- | 'Cont' value, over every monoid result type
type UCont a = forall o. Monoid o => Cont o a

-- Some nondeterministic values
ms, ns, ps :: UCont Int
ms = return 3 mappend return 4
ns = return 2 mappend return 5

ps = liftM2 (*) ms ns

-- We can run these 'UCont's by providing /any/ conversion to a monoid
-- value, e.g., IO () or [Int].

asList :: UCont a -> [a]
asList kk = runCont kk (:[])

asIO :: Show a => UCont a -> IO ()
asIO kk = runCont kk print
\end{code}
=== Try it out: ++++
{{{
*Main> asList ms
[3,4]
*Main> asIO ms
3
4
*Main> asList ps
[6,15,8,20]
*Main> asIO ps
6
15
8
20
*Main>
}}}
===
* The @ps@ example shows that @mappend@ is working correctly on these simple events.  What else could be going on?
* Converted my projects to use the {{{LANGUAGE}}} pragma where possible.  Some extensions aren't yet covered.
* Switched my {{{CHANGES}}} files over to markdown, for automatically generated html versions.
* Returning to yesterday's bug: @leftUp@ & @leftDown@ work separately, but when I @mappend@ them, the earlier one gets lost.  Here's a theory: the implementation builds two separate mouse events and filters them.  Perhaps the two can't coexist.  Yep, I'm pretty sure that's what's happening.  My @wEvent@ function side-effects the @on mouse@ handler.  The solution might be simple: add on to the current handler.  Yes!!  Augmenting the handler fixed the problem.
* Set up a wordpress blog for Holly: http://hollyc.com/hollosphere
* Thinking about how to preserve input state through composition.  Idea: during "rendering" to a GUI, build in the ability to make a new input or output of the same type, but holding all state.  Leave  open the possibility of a user editing everything about the UI (except type), e.g., slider bounds, title (change/add/remove), widget choice.  Idea: put a @Source (Input WGet a)@ into each @WGet a@ and a @Source (Output WGet WPut a)@ into each @WPut a@.  Note the type circularity.  I'll have to use newtype wrappers.
Type the text for 'New Tiddler'
* Some progress toward preserving input state across composition.  I separated out the initial value part of widget construction so it can be replaced on reconstruction.
* Changed my @WGetPut@ representations to use @newtype@.  Dropped all the other newtypes.  I'm about to add @CurInput@ and @CurOutput@ sources, which would have required more newtypes.
* I'm not sure about @Adorn@.  It's too powerful sometimes, stripping off my abstractions where I want them.  Wean off of for @WGetPut@.  Define some explicit conversions.  Or: remove the functional dependency and add an identity rule for @Adorn a a@.  Use fully typed.
* Defined a type of bijective arrows, as in [[There and Back Again: Arrows for Invertible Programming| http://www.citeulike.org/user/conal/article/1214009]].  Using it instead of @Adorn@ (defunct).  Give me better control over conversions and still fairly easy.
* Getting an internal compiler error when when compiling @WGetPut@.  I'm trying the latest mingw32 snapshot (20070824).  Rebuilding packages.  Got to Phooey, which needs {{{wxHaskell}}}, which needs {{{time}}}, which is absent (apparently recently unbundled).  Got {{{time}}}, but it requires the cygwin {{{autoreconf}}} program.  Getting that & updating my cygwin packages.  I ran {{{autoreconf}}}, but still no {{{HsTimeConfig.h}}}, so I copied it from my previous, mostly working GHC installation.   Also copied {{{include/gmp.h}}}, as on [[2007-08-21]].  When continuing a compile, {{{rm out/wxc/{,ewxw/}*.d}}}.
* Phooey's {{{Monad}}} interface isn't compiling.  Maybe a change to the {{{forall}}} typing rules.  Taking this opportunity to redo Phooey, with a simpler monadic interface.  Removed {{{TagT}}},{{{LayoutT}}} and {{{Arrow}}} modules from {{{Graphics.UI.Phooey}}} and {{{Examples.Arrow}}} in the cabal file.  Remember to remove them from the darcs repo as well.
* I think I lose stretchiness on a @return@.  Oh!  My identity layout is @empty@, which is not stretchy.  Make it @fill empty@ instead and see what happens.
* Tweaked {{{WGetPut}}} to track sources of gets & puts.  Surprisingly easy.  However, I don't know what to do with the result.  It's the //TV// that gets transformed, not the output derived from it.  Hmm.  Maybe I could build up a source of inputs & outputs instead gets & puts (rendered versions).  Yes, that worked.  I wonder if I could use @Pair@ & @Fun@ instances on @Input@ & @Output@.  Sure enough.  Maybe use those interfaces in place of @ipair@, @opair@, and @oLambda@.  Okay, now the input state of the argument-holding TV is remembered!  Next: the function-holding TV as well.
* I want to start building GHC HEAD from the source repo.  Grabbed the repo and started a build.  I got a build error.  The file {{{includes/GHCConstants.h}}} hadn't gotten filled in with correct values.  I cheated, by copying the file from another installation.
* I think I found the magic I need for compiling ghc: ++++
{{{
make distclean
sh boot
./configure --host=i386-unknown-mingw32 --with-gcc=c:/mingw/bin/gcc --with-ld=c:/mingw/bin/ld.exe --includedir=c:/cygwin/usr/include --with-gmp-includes
make
}}}
=== Now {{{includes/GHCConstants.h}}} gets created correctly and {{{gmp.h}}} shows up.  Ran into another problem.  The C compiler thinks there's an unterminated char constant in the following line in {{{ghc/rts/win32/seh_excn.h}}} ++++
{{{
#error Don't know what sort of Windows system this is
}}}
=== Next I noticed hundreds of split object files getting created.  I don't know where to set {{{SplitObjs = NO}}}, other than {{{mk/build.mk}}}, so I did the latter, enabling the "{{{quick test}}}" build.
* Consider some alternatives to @IO@ in Phooey & Eros that expresses and enforces safety.  In particular, an output is represented using a sink, i.e., @a -> IO ()@, which could do anything at all.  In fact, all it does do is tweak the state of a particular widget.  Consider (a) the @ST@ monad and (b) Wouter Swierstra's [[Data Types a la Carte| http://www.cs.nott.ac.uk/~wss/Publications/DataTypesALaCarte.pdf]] ideas.
* Composition idea: when I pick an output, not only highlight the type-compatible inputs, but also have those inputs temporarily take on the value of the chosen output, so that corresponding outputs preview the effect of the connection.  Alternatively, make that preview connection only when I hover over an input.  Here's a puzzle: what about inputs in the same TV as the chosen output.
* Imagine create fixpoint by feeding output into input in the same TV.
* I heard from Wouter Swierstra that he'll delay the next //The Monad Reader// longer, giving me time for the "Applicative Data-Driven Computation" article.  I want to write it up now anyway, as the libraries have improved and will improve more through writing.  Maybe I'd better take a fresh start, since the current version is very implementation-driven.
* Notes on a new "Applicative Data-Driven Computation" article: ++++
*  Simple model of "events": @type Event f a  = Time -> f a@.  Events are especially useful (as events) when they "occur" (have non-@mempty@ values) only discretely.  When @f a@ is a monoid, @Event f a@ is also, so we have the non-occurring event and event combination.  Examples: @Maybe@, @[]@ (other @MonadPlus@ types), @m o@ for monoid @o@.  It's also easy to define @Event f@ as a functor.  Another operation is filtering, based on @filterG :: f (Maybe a) -> f a@.  Here's a definition. ++++
\begin{code}
filterG :: (Monad f, Functor f, Monoid (f a)) => f (Maybe a) -> f a
filterG = join . fmap (maybe mempty return)
-- or, with help from ddarius
filterG = (>>= maybe mempty return)
\end{code}
===
*  Simple model of "sources" (called "behaviors" in Fran): @type Source a = Time -> a@.  Show the lifting trick.  Point out that the important thing is that @Source@ is an AF.
*  It's easy to see how a demand-driven implementation can work, which is fine for continuously-changing values.  When values change more occasionally, a data-driven implementation becomes much less taxing.
*  Simple idea: represent an event as a means of "subscribing" to it. ++++
\begin{code}
type Event a = (a -> IO ()) -> IO ()
\end{code}
=== A client provides a //listener// (or "callback") of type @a -> o@, which gets registered and later invoked on each occurrence.  Note that @Event@ looks a lot like @Cont@, which suggests a generalization. ...  What about the basic event operations?  @Cont@ is a monad and hence a functor.  The monoid operations come for free from the representation of @Cont@. (....)
*  Say why we do not use the Cont AF: it's not synchronous.  Cf @[]@ v @ZipList@ AFs.
*  What about sources?  Start with a demand-driven representation AF, which we'll refer to as a "sampler".  This time, instead of functions of time, we'll use another familiar AF: @IO@.  For instance, given @p, q :: IORef Int@, defined @r = liftA2 (+) p q@.[footnote]  To get a value, @r@ gets values from @p@ and @q@ and adds the results.  Next, rather than frequent polling of @r@, we'd like to know exactly when it changes.  How about use an event?  If @c@ and @d@ are change events for @p@ and @q@, then the change event for @r@ is @c mappend d@.  Moreover, the change event for @fmap f p@ is @c@, and the change event for @pure a@ is @mempty@.   In fact, these rules look very like the following standard instances: ++++
\begin{code}
instance Functor ((,) u) where
fmap f (u,x) = (u, f x)

instance Monoid u => Applicative ((,) u) where
pure x             = (mempty, x)
(u, f) <*> (v, x)  = (u mappend v, f x)
\end{code}
=== How might we get the functor and AF operations to not only manage change events, but also perform the functor an monoid operations on our demand-driven representation?  By composition. ++++
\begin{code}
newtype (g O f) a = O { unO :: g (f a) }

instance ( Functor (g O f) , Applicative g, Applicative f)
=> Applicative (g O f) where
pure x            = O (pure (pure x))
O getf <*> O getx = O (liftA2 (<*>) getf getx)
\end{code}
=== We've only used the fact the @Event@ is a monoid and @IO@ is an AF, so our general notion of sources is this simple composition: ++++
\begin{code}
type SourceG change sampler = ((,) change) O sampler
\end{code}
===  When @change@ is a monoid and @sampler@ is an @AF@, @Source change sampler@ is an AF that simultaneously composes change //and// samplers.  As a common special case, ++++
\begin{code}
type Source = SourceG (Event ()) IO
\end{code}
=== As we'll see, however, most source operators work in a more general setting.
*  More examples.  Each example can introduce some event & function operators.
*  Applicative GUI programming
===
* Make the paper fun to read.  Introduce examples from the start, and motivate definitions from examples.  Ideas for examples: ++++
*  Buttons as unit-valued events.  Use @fmap@ and @mappend@ to combine into a single informative event.  Do something with menus also.
*  Filtering.
*  Accumulation.
===
* Come up with demos for Phooey, combining events & sources.  Maybe build the paper around the demos.  Ideas: counter (from [[Structuring Graphical Paradigms in TkGofer| http://www.citeulike.org/user/conal/article/1617415]]), calculator (adapting [[Lightweight GUIs for Functional Programming| http://www.citeulike.org/user/conal/article/1617412]]).
* Something went wrong with uploading my journal to the ZiddlyWiki server, some time over the summer.  Things like {{{\begin}}} turn into {{{\sbegin}}}.  What could have gone wrong?  For now, I've added http://conal.net/journal/index.html, which is a copy of my local journal.  Not a very pretty solution, but it works for now.    Write a note to the tiddlywiki group: +++>
I've been using ZiddlyWiki with the 2.0.11 core and am wondering about my upgrade options.  I haven't tracked this mailing list for months now, and I've lost track of the landscape.  My last memory is of some developments for demand-driven tiddler downloading (ajax-based, I suppose).  I was excited about that direction, as my TW is getting big.  Could someone summarize or point me to a summary of the status of server-based TW, including incremental/demand-driven downloading?
===
* Got a nice calculator demo program going.  It uses events more than sources, so I don't know how to cast it in the AF interface.  Think about it.  Came up with a lovely trick: use @mconcat@ on event-based UIs to merge their events and their appearance.  Do it twice for 2D layout: ++++
\begin{code}
-- Single calculator key
key :: Char -> UIE Char
key c = button c [ text := [c] ]

-- Row of keys.  Uses the Monoid instances for UI and Event
row :: [Char] -> UIE Char
row = fromLeft . mconcat . fmap key

-- Rows of keys.
rows :: [[Char]] -> UIE Char
rows = fromTop . mconcat . fmap row

-- The whole keyboard.  Four rows of four keys each
calcKeys :: UIE Char
calcKeys =  rows [ "123+"
, "456-"
, "789*"
, "C0=/" ]
\end{code}
===
* Got my journal switched over to the latest version of TiddlyWiki.  I'll try out [[MiniTiddlyServer| http://www.minitiddlyserver.com]] as a server-side solution.  It looks pretty good.  No load-on-demand yet (anywhere, afaict), but it does upload only changed tiddlers.
* [[MiniTiddlyServer| http://www.minitiddlyserver.com]] requires PHP 5, and Joseph's server has PHP 4.2.2.  Looking into  upgrading.
* Thinking about Phooey demos.  The calculator demo doesn't //really// need sources.  It could route the event right into the output.
* How to explain sources: start with events.  Give examples.  Then an example combining data from two events (shopping list).  The @Cont@ AF would combine all occurrences of each.  We want something that is to @Event@ as @ZipList@ is to @[]@: synchronous combination.  When one event occurs, combine the value with the most recent value of the other.  So, we need not only an event, but a way to retrieve the most recent occurrence value.  A "source" has these two parts.  Given the value retriever, the event part needn't carry any information.  For instance, @type Source a = (Event (), IO a)@.  (As with events, there's a more general notion.)
* Is there a way to avoid the IO-ness (ref-ness) of creating accumulating events & sources?  Consider @mkAccumE :: a -> Event (a -> a) -> IO (Event a)@.  I guess the reason there has to be an @IO@ is that the result really does depend on when it's called.  Invoking later makes an event in which fewer occurrences get accumulated.  Hm.  Do I really want this notion of event?  Or something that captures //all// occurrences, as in Fran?  Fran's notion was susceptible to time & space leaks, but only in the same way that lists are.
* Now I can do a {{{make}}} command in my {{{~/Haskell}}}, and the command will propagate to all of my projects (listed in the {{{Makefile}}}).  //Very// handy for clean recompilation, etc.
* Tweaked my emacs junk saver so that it adds a mode-specific comment with the origin file of the clip.
* The phrase "peace & justice" got my attention.  It occurs to me that the idea of "justice" presumes right & wrong and so is inherently at odds with peace.
* I could make a UI type parameterized by the value container @f@.  Then  ++++
\begin{code}
Functor      f => Functor (UI f)
Applicative  f => Applicative (UI f)
Monoid   (f a) => Monoid (UI f a)
\end{code}
=== Handy for @f = Event@ and @f = Source@.   Try it!
* How can I generalize the monadic interface to work with events & sources both?  For instance, I might want a text entry input as an event, to provoke an action when a value is entered.  Or maybe keep it a source and use a button event to snapshot.  Source input need initial values, while event inputs don't.
* In Eros outputs, maybe replace @Source O OI@ with @OI O Source@.  Close to the monadic Phooey interface, rather than the applicative interface.  Could I simplify my output-related type classes to have one constructor parameter instead of two?  For instance, ++++
\begin{code}
class Fun dom ran where
fun :: forall a b. dom a -> ran b -> ran (a -> b)

class Fun dom where
fun :: forall a b o. dom a -> (src b -> o) -> (src (a -> b) -> o)
\end{code}
=== I don't think so.
* Note to Wouter Swierstra: +++>
After getting started, my inclination is to release a series of blog posts instead of a single article.  Maybe after the blog posts, I'll better see an article-sized unity, but maybe not.  So I'd suggest going ahead without me.

In addition to the beauty of the general idea in your "a la carte" paper, I got very excited about your application to separating out IO into many pieces and thus giving much more precise information about what kind of effects a computation can do and what it cannot.  I wholeheartedly agree with your "sin bin" comment and often wish the monadic approach to IO hadn't been discovered, to leave more incentive for a solution that is semantically tractable as well as practically convenient.  My hunch is that solution is still waiting to be discovered, but not many folks are looking.

Example: IO-based GUI programming.  I'd like to enforce in the types that some of the IOs can only write to widget state, and moreover only particular widgets.  In the applicative functor Phooey interface, I formulate outputs as having type
\begin{code}
type OWidget a = UI (a -> IO ())
\end{code}
The idea is that, when rendered, the output UI makes its visible widget and gives a way to write to it (the a -> IO ()).  Then connect an input widget uiA and an output widget uiO, by "uiO <*> uoA" to get the runnable UI (IO ()).  (Hidden is my type of time-varying values with data-driven computation.)  So this very harmless UI has a very scary type.
===
* Ideas on accumulating events: ++++
My event examples are using event //accumulation//, which works via  ++++
\begin{code}
mkAccumE :: a -> Event (a -> a) -> IO (Event a)
\end{code}
=== I could purely accumulate the @a -> a@, and even do so very elegantly as the endomorphism monoid (@Endo@), so as not to have to specify the identity (@mempty@) and composition (@mappend@).  Whenever the endomorphism changes, it would get applied to the initial value, which would be frightfully expensive.  In a sense, the approach above exploits associativity of composition for efficiency.

Mitch Wand used this associativity trick very effectively in his paper "Continuation-Based Program Transformation Strategies".  He also came up with alternative (data) representations for the continuations.  Oh!  I could do that here.

For instance, I have an example with a pair of buttons, one of which increments and the other decrements.  I @mappend@ the events into a single @Int -> Int@ event and then use @mkAccumE@ (buried in a UI function that absorbs the @IO@).   ++++
\begin{code}
upDown :: Num a => UIE (a -> a)
upDown = op "up" (+1) mappend op "down" (subtract 1)
where
op str f = (fmap.fmap) (const f) (smallButton str)

counter :: UI ()
counter = title "Counter" $fromLeft$
do -- Apply each increment/decrement cumulatively
val <- 0 accumS upDown
showDisplay val
\end{code}
===
Using Mitch's insight, I note that each of those continuations is adding a number, and so could be represented by a number.  Using the @Sum@ monoid instead of @Endo@ results in an efficient, evaluated representation of that continuation, namely a single number to be added. ++++
\begin{code}
upDown :: forall a. Num a => UIE (Sum a)
upDown = op "up" 1 mappend op "down" (-1)
where
op :: String -> a -> UIE (Sum a)
op str n = (fmap.fmap) (const (Sum n)) (smallButton str)

counter :: UI ()
counter = title "Counter" $fromLeft$
(fmap.fmap) getSum (monoidS upDown2) >>= showDisplay
\end{code}
===   Works great.

The next example is the calculator.  The transition function: ++++
\begin{code}
cmd :: Char -> (CState -> CState)
cmd 'C' _                 = startCS
cmd '=' (d,k)             = (k d, const (k d))
cmd  c  (d,k) | isDigit c = (10*d + ord c - ord '0', k)
| otherwise = (0, op c (k d))
\end{code}
===
What is a data representation of the subset of state endomorphisms created from compositions of @cmd c@?
===
* Tried out {{{haddock.ghc}}}, running without {{{cabal}}}'s help.  Got two errors, and asked David Waern for help.
* Eros composition now remembers the full input state from both composed TVs!  Turned out to be quite simple.  Discovered I was losing my labels during composition.  Fixed.
* Wrote a reply and a post on [[http://evolve.awakeningcompassion.com| NVC Evolves]].
* Blog notes: ++++
I've been noodling about latency in the context of our living-in-the-sticks satellite internet service, compared with our deliriously responsive Seattle cable-based internet access.  Holly told me about the Vudu, which gives you instant access to 5000 movies.  The trick is in the solution to latency, as a complement to download-on-demand.  The Vudu box contains the first 30 seconds of all those movies.  If you start watching one of them, the box will start downloading the rest.  In other words, the box //pre-buffers//, to eliminate the buffering latency.  From there, possibilities abound.  For instance, suppose you start watching a TV series that's not pre-buffered.  Once you watch most of one episode, the software can guess that you'd probably like the following episode pre-buffered.  Or much more sophisticated collaborative filtering.  Instead of just recommending a video, based on statistical patterns, pre-buffer it as well.

Now, where else can we apply this pre-buffering trick?  How about web pages?  And links within those pages.  Especially if the browser (extension) can make good guesses at what you're likely to click on.  For instance, use the currently visible portion of the page, plus a little past.  Even track the mouse motion.  Or use the idea in [[TiddlyWiki| http://tiddlywiki.com]], which encourages and supports microcontent, where there are many fewer choices of links.  Another TiddlyWiki use is pre-buffering lists of microcontent names and their tags ("tiddler metadata").

Then there's browser cache management.  Instead of the current choice of keeping a whole page or purging it, pare the cached content down to just a pre-buffer.
===
* Write about the dual monoids in the event algebra (events & listeners).  Lovely!
* Tweaks to haddock.ghc to compile with ghc-6.7 20070824: ++++
*  Still breaking.  Plus one more windows-specific problem.  And now my ghc snapshot dies when linking haddock.  Someday Linux.
===
* Yesterday Holly & I had a pow-wow about improving the user experience for Eros, and today I'm figuring out how to do it. ++++
*  Find new terms to replace "input" and "output", since an output of one TV is used "as input" for another.  Along the same lines, what's input (output) to an interactive program is output (input) to the user.
*  Come up with simple & compelling uses of higher-order composition, i.e., filling in a function-valued input with a function-valued output.
*  Pick the output first (reversing current style).  Compatible inputs highlight.
*  After picking an output, hovering over a compatible input causes the output value to override that input, thus giving a preview of the result of composition.  Moving the mouse out of the input area ends the override.
*  Allow a sort of "temporary" connection to be made, without combining TVs.  In other words, separate out "connect" from "merge".
*  How to visually denote unmerged connections?  Some possibilities: ++++
*   Visual pathways between TV inputs & outputs.  I don't know how to get graphics into the mix with MDI child frames.
*   "Docking", i.e., immediate proximity.  I don't know how to handle crowding when there are several adjacent small inputs each connected to large outputs.  Or an input with several connected outputs.
*   Bust out of 2D GUI space and use more freeform 2D or 3D graphics.  Show pathways, preferably animated to suggest flow.
*   Matching patterns of color & rhythm.  Remember that there can be several simultaneous connections.  I can see how this approach could work.
===
*  If I pick the output first, how do I deal with "tweaks" (dup, swap, curry, etc)?  Currently, I pick a tweak, and then compatible outputs highlight for choosing.  Kind of cool, but not really user-friendly.  After all, I don't expect the user to just have a hankering to curry something, but rather to have a particular output to curry.  So, a more fitting interface would be to select the output and then see what tweaks are compatible.  Oh!: right-click to pop up a context menu, populated with just the applicable tweaks.  (In wxHaskell, use @windowOnContextMenu@ from @WXCore@.  I haven't found a higher level @WX@ version.)  A context menu will have "connect" followed by the type-compatible tweakers (usually very few).
*  For now I'm reworking the @WPut@ representation (for outputs) so that the source being shown is available for other uses.  In particular, to connect to other inputs for preview or temporary connection.  The redesign is tricky, as currently no information passes between input & output of a lambda GUI.  Instead, the output passes back a source of sinks and the lambda stitches the source & sink together.  I'm playing with a new design in which rendering an output (into a @WPut@) passes in the source that gets output.  Which means that lambda (@fun@) has to work @Monad@-ically, rather than @Applicative@-ly.  Moreover, I'm not yet seeing how to keep the very simple, regular and automatic (via type classes) way that @fun@ now works.
*  Another issue: I think I'll need to create a source of sources and then collapse into a source.  The source of sources would come from selecting an output source to connect.  Or it could be that choosing an output for connection would yield an source-valued event.  Still that event would be turned into a source.  That operation is the @switcher@ combinator from Fran.  I don't think I have now a reasonably efficient way to implement @switcher@, because listeners for no-longer-relevant sources would hang around until they get GCd.  Maybe use the idea of having @subscribe@ return an @unsubscribe@ action ([[2007-08-29]]), instead of or in addition to weak references.  Perhaps @unsubscribe@ could simply invoke @finalize@ on the listener weak ref.  Give a go at the @unsubscribe@-style events.  But first, try switcher with the current representation.
*  Hey!  @switcher@ can be defined via @stepper@ and @join@ (rather than @stepper@ via @switcher@).  I didn't know that.  So, @Source@ as a monad is useful after all.  What about events?  Would @join :: Event (Event a) -> a@ do what I want?  Not with the @Cont@ monad, which remembers //all// occurrences of the outer event.  The occurrences of @join ee@ are all occurrences of all events that come from @ee@.  Hm.  Oh, hey again!  That semantics for @join@ may be exactly what I want in place of my imperative @eventX@ function for creating extensible events.  Instead of merging events in via an explicit sink, have an event-event.  Specifically, there's a @newTV@ event that comes from choosing a TV from the "Parts" menu or doing a composition.  But composition also generates opportunities for more @newTV@ occurrences.  So, massage @newTV@ so that each occurrences contains another @newTV@ event, and @join@ the result.  Now consider a source of sources.  I could simply @join@ the underlying change event and @join@ the @getter@.  However, I think I'd get something that has lots of false change reports, based on changes to no-longer-relevant sources.  I guess a solution is to define a @join@ variation that stops paying attention to occurrences of old events.  Give it a try.
*  Oh -- all I need is a function @once :: Event a -> Event a@ that yields an event whose only occurrence is the first occurrence of the given event.  Then I can say @join (once ee)@.  Okay, how to implement @once@?
*  Hm: see if I can easily define monad instances for various @O@-style compositions, if I think of @join@ instead of @(>>=)@.  Look at @Source@ and then (monadic) @UI@.
===
* Emacs idea: have mmm parsing happen automatically in the background.  Learn how the background-processing trick works.  See haskell-mode.
* We're very close to the edge of our 7-day satellite bandwidth limit, so we're taking it easy with our internet use today.  To do: ++++
*  Copy {{{papers/ppdp00/default.htm}}}.  Had a bogus bibtex entry. [done]
*  Look for an Emacs mode for mediawiki markup.
===
* Here's an angle on @WGetPut@ to get access to an output's source, while retaining the elegant automatic composition that happens now.  Have the @WPut@ work as now and pass back a function from source that installs a "connect" handler (or the whole context-menu handler).
* Think about today for Eros: ++++
*  HOF example.  Could do @liftA@ & @liftA2@ for regions.  Build intersect & union.
*  Editing titles & bounds.
*  Replace @eventX@ with @join@.
===
* I want to play with @join@/@(>>=)@ on events.  Examples: ++++
*  Asteroids:  each collision can break an asteroid into more of them, each of which has to be tracked for more collisions.
*  A chat room has an "enter" event, whose occurrences contain new events like "speak".
*  In Eros, track operations on TVs.  Compositions yield more TVs to track.
===
* More thoughts on the data-driven build (see [[2007-08-24]] and pointers from there): ++++
Functional programming (FP) supports composition well by making data dependencies explcit.  (See John Backus's Turing Award lecture.)  While this property applies FP's "programming model", it does not apply to the model of programs (source code), in which (syntactic) data dependencies are implicit as with other styles of programming language.  For that reason, source code dependencies have to be explicitly (and imperfectly) analyzed or left unknown.  This incomplete information leads to two problems in the build process: over-compilation and under-compilation, due to over-estimation or under-estimation of dependencies.  (Examples: {{{make}}} rules with missing or extraneous dependencies; use of @import@ or {{{#include}}} triggering recompilation after irrelevant changes.  Others?)

Here's a simple solution: apply the functional model to the manipulation and application of source code.  Make the dependencies totally explicit and purely functional.  Use a simple, general tool for data-driven evaluation of varying values connected by pure functions, such as [[DataDriven]].   Every form of dependency becomes an explicit parameter.  For instance, replace {{{#include foo.h}}} or @import Foo@ with a formal parameter (a lambda) //in// the module source code and an application outside of the source code (in something like a "make" file).  The parameter/lambda has a type like {{{module}}} rather than one like {{{String}}}.  In a CPP-like setting, this application of parameterized code to argument code simply does the job of CPP.  With just a bit more sophistication, one could easily support "pre-compiled headers" (PCHs).  Each PCH is defined as a particular {{{precompile}}} function applied the header source, and then code source is defined as a function applied to some number of PCHs.

This idea applies not only to source code but to object code as well.  The linker is a function that takes object code chunks as arguments and produces executable code as output.  For convenience, there's also the archive function ({{{ar}}}), which combines several pieces (usually object code) into one.  (I'm getting beyond my knowledge here.)

A Haskell module compiles into two results: object code and an "interface".  Importing a module means applying to the interface part, while linking applies to the object code part.  An "interface" is a dictionary mapping names to types and some other information.  When the @import@ line lists a subset of the exported names, then the actual dictionary is the result of applying a filtering (subsetting) function to the export dictionary.  When the import is "qualified", the dictionary is further transformed.  These transformations can be described as very simple pure functions applied to a functional dictionary type (@Data.Map@, using @filterWithKey@ and @mapKeys@).

Given such explicit and precise knowledge, it's easy for a data-driven evaluation framework to do better than conventional tools.  For instance, if I change a comment in a header, the PCH won't change, so the source module depending on the PCH won't get recompiled.  Similarly for an interface @import@.

Code processing may be chained.  For instance, a grammar might get compiled into C code that then gets compiled into object code and from there linked into an executable.  With FP, such chains are written as nested applications or directly as function compositions.

Let's call the previous example "first-order chaining".  There's also a "higher-order" version: the grammar compiler itself is computed by applying {{{compile}}} (& link) to the its own source code, which itself comes from applying parameterized source code to headers (possibly precompiled).  When the grammar compiler's source code or headers change, it must be recompiled, and the resulting function applied to the grammar (and the result recompiled, etc).  Higher-order chaining is directly and powerfully supported in FP as higher-order functions.

Given that both a function and its argument can change, the applicative functor (AF) interface is exactly the right abstraction for tool building.  AFs have two operations in common: one, @pure x@ promotes a pure value to an AF value; and the other, @h <*> a@, applies an AF-style function to an AF-style argument.  You may not have heard of AFs before, but if you know some monads, you already know AFs also, as every monad is an AF (with @pure@ and @<*>@ corresponding to @return@ and @ap@).  As a common case, in place of @pure f <*> a@ for a pure function applied to an AF, there are the equivalent short-hands @f <$> a@ and @fmap f a@. I'll refer to the particular AF for varying values as @Source@. So far I've avoided talking about //files//, which are at the center of current source processing. Files, and particularly reading and writing them, don't have much of a place in a functional paradigm. Their roles would be (a) interfacing to the world outside of our abstraction, and (b) persistence of defined sources for later use. There are two halves of interfacing to the outside world: import & export. Importing a mutable input file means wrapping it up as a source, e.g., with help from a facility like [[hinotify - Haskell binding to inotify| http://www.nabble.com/ANNOUNCE:-hinotify-0.1-t2384572.html]]. Exporting a source creates two results. One is a new file, and the other is an active thread that keeps the file up to date with the source's changing value. How often does the implementation recompute? Pure data-driven or demand-driven or something else? With {{{make}}}, one explcitly says to when update ({{{"make"}}}) and over what set of inputs, outputs and dependencies (choice of make file & target). GUIs toolkits are mostly data-driven, but not fully. They handle some events differently from others. Mouse position is a frequently changing input, so typically motion events are filtered so that an application's event-processing code sees only some of the motions. This special-case hack addresses part of a general problem of assigning finite processing resources to diverse and rapidly changing input. I'd like to generalize the hack to a more widely applicable solution. Misc to write about: Connections with [[nix| http://nix.cs.uu.nl]] and with ML functors. Also, incremental/adaptive computation. === * Wow, what a great idea: [[Mirrorboard: A one-handed keyboard layout for the lazy| http://blag.xkcd.com/2007/08/14/mirrorboard-a-one-handed-keyboard-layout-for-the-lazy]]. Type one-handed on only one half of the keyboard. Use caps-lock to mirror the key's interpretation, thus simulating the other hand. The writer also suggests of using smart software to guess the mirrorings, instead of caps-lock. I love that idea. Tons of comments at the blog post. * Hm -- suppose an event has no remaining live listeners. ... * Progress with getting haddock.ghc to compile. Seems to be a ghc bug in writing a temporary .rc file with single backslashes in path names. I worked around the bug for now and have a new haddock.exe. Running into more problems with it. Asked David Waern for help. * Tried replacing @ReaderT@ and @WriterT@ with @Monad@ instances for the corresponding type compositions @((->) r O m)@ and @(m O (,) w)@. Doesn't work because these two patterns overlap, and the latter overlaps with other stuff. * Apparent GHC install bug: my {{{package.conf}}} file lists {{{/usr/local/doc}}} as the documentation root instead of the actual installation location ({{{c:/ghc/ghc-6.8.20070909/doc/}}}). I worked around the problem with a symlink. * Updated the wiki pages for [[TypeCompose| http://haskell.org/haskellwiki/TypeCompose]], [[DataDriven| http://haskell.org/haskellwiki/DataDriven]], and [[Phooey| http://haskell.org/haskellwiki/Phooey]]. * Eros ++++ * Now all outputs highlight (even sliders & checkboxes) and do so consistently as a thin frame around the actual control. * Puzzling over preview compositions: ++++ * The effect: temporarily show the effect a composition would have. Pick an input and move the cursor over a compatible output. (The order of composition choices may change.) Before committing, the input starts taking its values from the output. When the cursor moves out of the output area, the temporary connection is broken, and input goes back to normal. * Why tricky: an output doesn't currently have access to the source being displayed. Instead, it returns a source of sinks. This separation lets me use AF-style, rather than monadic, composition. Then type composition makes my AFs into AFs. * Idea: when I enter the output space, a simple event occurs at that level, delivering a function from the (unknown) source. The containing lambda transforms that event by applying the (now known) function to the source. * Along with the existing @CurrTrans@, there will be a new item coming down each @WGet@ and @WPut@, namely the "source of interest". It will be type-wrapped, just like @CurrTrans@. * Oh, yeah! Now I remember the puzzle of how to track this source with reasonable efficiency. The challenge is that I'll have to switch from one source to another. I want to make sure listeners on old source change events get dropped when I switch to new sources (@join@ plus efficient @onceE@). * Punt ephemeral? * Oh! Here's a simpler way to preview: actually do the composition and make a whole new TV. The TV only "sticks" if committed, and the parents are removed only in that case. Otherwise, when the cursor moves out of the counterpart's space, the composition disappears. Make sure the temporary TV is out of the way of the parents. Try this approach first, as it's probably a much smaller impact on the current implementation. For simplicity, don't worry about making the composition only once. If easier, allow the transient composition to disappear on counterpart exit or connect and then make a new one in the current way. * Working on this last idea. Coming along fine. Interesting pattern: begin a composition, and either commit or abort/undo. Sounds kind of like a transaction. Define: ++++ \begin{code} type TransactE' fa = Event ((Event (), Event ()), fa) -- commit, abort, value type TransactE f = Event O ((,) (Event (), Event ()) O f) \end{code} === Or replace the pair of events with a single event that describes how the transaction ends: ++++ \begin{code} data End = Abort | Commit type TransactE' fa = Event (Event End, fa) type TransactE f = Event O ((,) End O f) \end{code} === Yeah. That one. === * As a teaser, highlight compatible outputs while the cursor is in an input, even without clicking. The input choice only "sticks" when clicked. * Give hints in the status bar about what the user might do. Maybe have a settable chattiness level. * Imagine Eros in a shared space with lots of people grabbing & connecting. === * Updated [[TV wiki page| http://haskell.org/haskellwiki/TV]]. I'm increasingly drawn to the idea of describing TV (and Eros) as "composable MVC". And to using the terms "controller" and "view" instead of "input" and "output". Those terms would give programmers a familiar starting point. I could say how TV (renamed) is like MVC and how it's unconventional. * Eros: ++++ * Current transformation (@currXf@, maybe-valued): ++++ * Tracks the chosen transformation. * Offer a new transformation (@Just@) when the @newXf@ occurs. * Retract the offer (@Nothing@) when it ends (abort or commit). === * New TV: ++++ * Compose TVs or choose from parts list * Close new TV if abort before commit. How to handle the "before" part? Use @onceE@ to make sure //end// events can only occur once. * Close parent TVs if commit before abort. === * Puzzling over events with unsubscription: ++++ * The approach I like is to use @Cont@, an return @m (m o)@ instead of @m o@, particularly @IO (IO ())@. The @subscribe@ returns an @unsubscribe@ action. ++++ \begin{code} type EventUndo m o a = Cont (m (m o)) a type EventUndo' m o a = (a -> m (m o)) -> m (m o) \end{code} === * Event merging (@mappend@) combines both @subscribe@ and @unsubscribe@ actions. Cool! * A listener has type @a -> m (m o)@. For what purpose can the return action (@m o@) be used? My first guess is an "undo", as with subscribe. I like that. Give it a try. * Perhaps what I'm exploring here is a monad of reversible computations. * Suppose I have a stateful container type whose @add@ operation yielded a @remove@ action to be invoked later. Such a container could form the basis for @mkEvent@. (Oh, oops -- in that case, I think I'll have to make @mkEvent@ an IO again.) I could use a map with a next-free-index. Is there a simpler way? For every listener added, make a new listener-valued ref whose value starts out as the listener. The listener @mappend@-ed is @join (readRef lref)@. The retured unsubscribe action sets the @lref@ to @mempty@. The downside is that the dead listeners accumulate over time. Start simple. * Here's an idea for taking @mkAccumE@ out of @IO@. Do a @join@/@(>>=)@ thing in which each occurrence gets passed into construction of the remainder: ++++ \begin{code} accumE :: a -> EventU (a->a) -> EventU a a accumE e = do f <- once e let b = f a return b mappend (b accumE e) \end{code} === This version waits for the first real occurrence. Or we could yield @a@ immediately: ++++ \begin{code} a accumE e = return a mappend do f <- once e a accumE e \end{code} === The general type: ++++ \begin{code} accumE :: (Monoid (m (m o)), MonadFix m) => a -> Cont (m (m o)) (a -> a) -> Cont (m (m o)) a \end{code} === Now I think the only @IO@ operation remaining is @refEvent@. It could take a ref argument, and so put off the @IO@ to the caller, which might absorb it. Or just don't use @refEvent@. === * Eliminated a source of @IO@ in Event: now @mkEvent@ has a pure type. Several other interfaces followed. I still have @IO@ in @mkAccumE@. That one is probably necessary semantically, since the result of accumulation depends on when the accumulating starts. * My Gaim got into a weird state in which it uses "b" to toggle logging. I got an answer on {{{#pigin}}}: hover over the menu entry and hit {{{<backspace>}}} to remove the binding. Or hit another key combo to add/change the binding. I think it's a GTK thing. * Meanwhile, I started using the emacs-based IRC client {{{erc}}}, which is awesome. I'm not going back. * Eros: ++++ * I've been wondering how to make and what do do with unsubscribe-friendly listeners (continuations). They're there for consistency with unsubscribe-friendly subscribers. With @join@/@(>>=)@, listeners and subscribers come together: the listener creates an event (@Cont@) and subscribes to it. So now I think I understand the point of the unsubscribers/undoers returned in listeners: stopping any ongoing effects/listeners. In essence, for killing spawned threads. Given a subscribing listener @l@, a primitive event @e@ must gather the unsubscribers returned from the listener for of its occurrences. The unsubscriber returned from @subscribe e l@ must somehow execute those unsubscribers, as well as disconnecting @l@ from @e@. Where is this mass unsubscribing useful? With @e >>= f@. Given a listener @l@, each occurrence @a@ of @e@ leads to a new event @f a@, to which @l@ is automatically subscribed. Now @l@ loses interest in @e >>= f@ after several such occurrences. It will have to be removed from all of these events. * Maybe this business can be done much more simply, along the lines of ephemeral listeners. When I want a temporary listener, for whatever reason, wrap it in a way that it can be removed by remote control from all events that get ahold of it. For instance, wrap in a maybe-valued ref that must be indirected through. Since listeners are always in a monoid, I could have the ref contain just a listener. Or, hide the ref in a new listener with exactly the same interface as the original listener. This trick is what I already do in my event-building primitive, before going to explicit unsubscribe: ++++ \begin{code} mkEvent :: (WeakM m, Monoid (m o)) => (Unop (a -> m o) -> m o) -> Cont (m o) a mkEvent modifySink = Cont (ephemeral >=> modifySink . flip mappend) \end{code} === A benefit and drawback of this @mkEvent@ is that the listenener ephemerality is completely hidden. Listener tombstoning happens only via GC, not on demand. * When do I want to dictate listener removal? ++++ * The listener writes to state in a subwindow that gets closed. In wxHaskell, we can get a crash. (My hack solution is to hide the subwindow, which avoids crashing but defeats ephemerality.) * An explicit, semantic operation like @once@ that yields a truncated event. === * Consider @fmap@ -- laziness :). Also @fmap@ and repeated composition for multiple listeners :(. * Back to the Eros @TopLevel@. ++++ * Cursor enters input space (first, for now), triggering @currXf@ (current transformation). If an abort (cursor exits input space), cancel @currXf@. Commit makes @currXf@ sticky, i.e., ignore abort. * Cursor enters output space, triggering @newTV@. If abort, close the new TV. If commit, close the parents. In this case, I want to know whether abort or commit. === * I really do want to switch the order of selection from input ("controller") first to output ("view") first. Each view has a type-specific menu with type-applicable DeepArrow operations ("tweaks") and "connect". If a tweak is chosen, yield the tweak in the @Transforming@ event. If "connect", contribute to another event that carries a continuation from a type-compatible transformation. * Oops -- I just discovered that the filtered mouse events are write-only. I count on read/write of handlers so that I don't have to make a new ref in the process of wrapping a wxHaskell event as a DataDriven event, which saves me an @IO@ in the type. Even with ref & IO, I don't know how to do it. Okay, I think the ticket is to wrap the //unfiltered// mouse mouse event. Oh -- I have already wrapped enter, leave, and rightDown. === * I think my approach to events has a serious flaw, reminiscent of Fran. An event is stateless, so nothing happens until subscribing. Here's how event accumulation works: ++++ \begin{code} accumE, accumE' :: a -> Event (a -> a) -> Event a a accumE e = do f <- once e f a accumE' e a accumE' e = pure a mappend (a accumE e) \end{code} === Nice & simple. And works fine when the argument event is non-accumulating. However, feed an accumulation event into @accumE@, and the accumulating starts anew at each @once e@. * Reworked lots of events & sources stuff accordingly. My examples aren't as pleasant. One helpful trick was to write the monadic operations so that they automatically lift to containing monads. ++++ \begin{code} class Lifts m m' where lifts :: m a -> m' a instance Lifts m m where lifts = id instance (MonadTrans t, Lifts m m', Monad m') => Lifts m (t m') where lifts = lift . lifts \end{code} === I think @Arrow@ really //is// the way to go for interactive stuff, since it tightly controls the flow of information in & through components. Just as Yampa hides "signals" and reveals an arrow of signal transformers, I could hide the stateful stuff. Hm. Full circle: Phooey started as an arrow. To do: consider functional interaction all over again. * Now I have a type-specific context menu for outputs/views. It offers the type-specific tweakers plus "compose". The latter isn't yet hooked up. Now I know how: Instead of passing a current transformation (@UFun@) down in output rendering, pass //up// an event whose value is a continuation on transformations: @Event (Sink UFun)@. At the top level, turn that event into a @Source (Maybe (Sink UFun))@ and pass it back down through outputs into inputs. Highlight compatible inputs just as I now show compatible outputs. When one is selected, the continuation is invoked. Some questions & possibilities: ++++ * What's in the continuation? Note input state sampling and child frame stuff. Maybe a delimited continuation producing a new output. * Maybe add in the two frames for closing. * Go both ways: input first or output first. * Here's a weird idea: transform the tweaks on the way down. Is @Uarr@ a deep arrow? It doesn't have the right kind. * Pass down the source also on the way down for preview override when in a compatible input. === * Hunch: output-first is difficult because of my choice of deep application. There are two of them. If I recall the other one and go for it, it work out easily. Then I can support //both// orders usefully, having with //different// results. * The trick: reverse roles between function and argument. Given a selected argument (output), deeply apply @flip ($)@ to get an inner //forall b. (a -> b) -> b// and extract the function.  (Note that @flip ($)@ is @pure@ in @Cont@.) The result is a new transformation that targets functions. * It might be useful to have support for function transformations more than just as as transformations that apply to functions. If a function transformation applies to a function of type //a -> b//, then A function transformation could be directed at a buried input of type //a//, and it would automatically extract the input and then apply the function transformation. Examples: curry, curry' (//flip . curry//) and argument swap. I don't really see how uncurry and flip could fit in, since they work on uncurried functions, and hence more than one input. * Could I define wxHaskell attributes at @Pair@ types? * Trying to get parent retrieval working in Eros. Hung up on keyboard events. I get them in my widgets but not the overall panel or frame. Asked for help. * Implemented hierarchical menus. * Reading the "Stream Fusion" paper. I like this phrase: "Our pipeline of list transformers has now been transformed into a pipeline of stream transformers" (end of 2.2). I've been thinking for a while about a representation for infinite continuous images that exploits the caching behavior of lazy data evaluation (unlike the function representation). The representation would use zippers and bi-infinite quad trees, with zippered bi-infinite sequences of approximations. I suspect it will be important to fuse some compositions to eliminate the intermediate representations. * List of Haskell [[language extensions| http://hackage.haskell.org/cgi-bin/haskell-prime/trac.cgi/wiki/HaskellExtensions]] * Add temporary connections. ++++ * Interface: ++++ * In an output context menu, select "start temporary connection". * In an input, select "finish temporary connection". Maybe even while hovering. * To cancel, choose "cancel temporary connection" in the input. * Maybe even "fuse temporary connection". === * What do I need to implement this idea? ++++ * Access to the //source// being shown in an output. * Could pass the source down. May complicate the wiring. * In output rendering, I currently pass up a source of sinks. Instead pass up a function from sources to action sources. Then yield an event of type-wrapped value sources, in addition to an action source. Pass a source of type-wrapped sources down output rendering into input rendering. === * I got this approach working. One glitch: it crashes on filters -- i think due to infinite regress, i.e., stuck in a source updating loop. How to avoid this loop: ++++ * Allow the regress but put some kind of limit on it. How could that work? * Try not to let an output influence an input in the same TV. But how? I pass up these "new source" events, combine them all into one and pass it back down. * Hold off on combining the "new source" events. Instead, pass them down uncombined, to be combined selectively in each TV. Give each TV a unique identifier. Throw that id into the mix. === === * Notes from "Effective Oral Presentations": ++++ * One main message only. A single sentence for listeners to remember. Expressed early in the presentation and chosen early in designing the talk. Focuses the presentation. * Parts: opening (motivation & main message), body (selected evidence), closing (restate main message). * Opening: Attention getter, need, task, main message, preview * Visuals (slides) & spoken text: each conveys the message, wholly and differently. Minimize text in visuals so as not to compete with speaking for verbal attention. "Do more with less: give meaning to all visual elements used, eliminate all visual noise." * One short message sentence (with verb) per slide, in lieu of title. Other text: keywords/labels to allow the visual to stand alone. Planning: choose the messages, then how to support with visual. * Minimize visual noise (fonts, sizes, colors). Develop in black&white. Then add color sparingly for emphasis or identification. * Face & address the audience the whole time: feet, hips, shoulders. Constant eye contact. "Establish a genuine relationship". === * I wonder: ++++ * How to demonstrate the software //and// keep connection with audience? === * In Seattle for Tasha's wedding. Leaving for Germany on Monday. * Thoughts about my ICFP talk: ++++ * What's my one message? Ideas: ++++ * Functional programming can be artist-friendly. * Programming can be made much more accessible by mapping parameterization onto interaction. * Media authoring (2D, 3D, video, interaction) is more accessible than programming, especially for right-brain creative folks. Programming is much more expressive. I'm going to show you a way to combine the strengths of each, by mapping typed functional values into interactive, composable GUIs. * Programming is a powerful medium of expression, but largely inaccessible to right-brain creative folks. For //functional// programming in particular, I'm going to show you a way to bridge the gap, by mapping typed functional values (including functions) into interactive, composable GUIs. === === * At IFL in Freiburg. * At IFL. * Showed Eros to Colin R and got some comments. Highlights ++++ * Use a canonical presentation per type, for concreteness. Same input & output. Particularly images! Change my filter examples to have a visual input, even it it's not modifiable. [Done. Much nicer!] * Think about naming. Ideas: Many-to-many relation between names & things. Name with pictures. * Do some thought experiments about fixpoints. Colin has some ideas & experience from a class experience: interactive mosaic construction. Come up with some nice fixpoint examples. === * [[MGS| http://mgs.lami.univ-evry.fr]] looks very neat. Amorphous computing. Nice language ideas. "Transformations of topological collections". From home page: ++++> The goal is to investigate the concepts and tools (programming languages) needed for the modelling and the simulation of dynamical systems exhibiting a dynamical structure. The results are validated by the design and development of an experimental declarative programming language and of some applications (mainly in the area of biological systems modelling). Our approach is based on the use of topological notions to extend the idea of rewriting systems. And we want to have fun! === Also see the [[publications page| http://mgs.lami.univ-evry.fr/PUBLICATIONS/publication.html#Documentation]] * Working image input -- visual, but not directly editable. I supply an initial value, which displays in the input widget. When connecting temporarily, update the received input dynamically. Do in general, not just with images. * John O'Donnell's talk "Circuit Parallelism in Haskell Programs". Very inspiring! Generate FPGA code that run with massive parallelism. In particular, create replicated units, each of which has its own local memory & processing. Example: interval selection sort (name?). Each cell (array entry) has a value (unchanging) and an gradually refined interval of indices, saying where the cell is to end up. Grab a pivot value. Count the number of values less than the pivot (log n). Refine the index interval (constant time). Other operations can sometimes be done without fully sorting: what's value at index i, and what's the index of value v. * Idea for presentations: point a laser at the screen and get the mouse to move accordingly. Trick: have a gyro, a calibration step, and a way to tip it back into sync as you go. Type the text for 'New Tiddler' * Second day of ICFP. My Eros talk was well-received yesterday. Last night I finally slept well. * Eros thoughts: ++++ * Play with some more deep arrow instances to fold in. Code view (via DeepArrow GADT), explanation. * From code view, extract executable code //and// formatted ({{{lhs2TeX}}}). * Make a binary distribution. But first, make Eros easy to use by others (discoverable functionality & UI). * Read up on iData & iTask (in Clean, by Rinus P et al). Probably the work closest to TV. Uses generic programming for type-driven UIs, including user-defined record points. (Solves a problem of TV/Eros.) I think iTask is an application of iData to workflow. Re-read SAPL paper (fast interpreter). * Revisit polymorphism question, relating explicit polymorphism with visualization as parameter. * How would * Jim McKinna asked how to present "tagged values", i.e., sum types. He suggested horizontally juxtaposed sub-guis with all but one greyed out. I suggested instead to have a label plus the corresponding typed value (constructor argument), //and// dynamically resize the GUI when the label changes. Try with @Either@ === * I was unsatisifed with last night's dinner conversation about religion. I realized suddenly that I'd rather be in bed, so I paid & left abruptly. If I weren't so tired & irritated, I might have been beter able to (a) love what is (as a starting point) and (b) ask for something to help. Here's a proposal: play a game. The goal is to optimize everyone's internal state, say heart-openness, enjoyment, curiosity. In the game, everyone provides honest real-time feedback on their actual state and sometimes makes requests for what would help them shift their state. Others can help people to clarify their needs & requests. Keep it collaborative, with a group goal. Play with this idea, formulating some games and trying them out. Think of my personal goal as having fun, and stay unattached to strategies and foci for fun. For instance, intimacy is fun, but so is cleverness & humor. * Some Scheme links: [[Planet Scheme| http://planet.plt-scheme.org]], [[Untyped blog| http://www.untyped.com/untyping]]. * I often write @(g.).(.h)@ (point-free) or @\ f -> g.f.h@ (pointed) in my code. Usually, @g@ and @f@ are inverses, so the composition looks like three sides of a commutation diagram. Example: temporal or spatial transformation of functions. * Idea: avoid actual tupling/untupling in arrow computations by doing for arrows what Manuel Ch did for parallel arrays. Apply to time-changing values. Success means that multiple inputs are seen to update independently. * Cliff Beshers gave a talk on Linspire experience. He's thinking along the same lines as I am about typed wrappers aroun files. "Write expressions on files." See if he want to go anywhere with these ideas. (He's left Linspire.) "Support files as typed values (TangibleValues?)". * Third day of ICFP. At last night's banquet, Paul H & I received the award for the most influential paper of ICFP 1997. Very gratifying. * During lunch, I remembered an old idea for eliminate all explicit use of IO, and I got excited about it again. ++++ * Interpret IO as data marshalling. * Users are pure functions, with user interaction being functions in mutual recursion. * What drives evaluation in a lazy language? Marshalling. * The old (Haskell 1.2) style of IO programming is close to this style, but it didn't work out well. Explain why, and what's different in my new approach. * Could old stream style IO work if the values exchanged are FRPish values? * Unified view of in-memory computation, inter-language calling (FFI), distributed programming, and user interaction. * Maybe deal with lossy marshalling, i.e., incomplete transmission of a value during marshalling. Or maybe require that marshalling is lossless, in which case some lossy conversion happens "before marshalling". Maybe depends on whether/when the choice of marshaller is automatic. Explore these questions via examples. * Use the TV trick of automatically removing matching marshall/unmarshall (output/input) on composition. Oh -- maybe start with TVs rather than with pure functions. Not sure. * "Eliminate unnecessary conversions" (a quote from Duncan Coutts's "Stream fusion" talk). * Examples: games, UIs, backup. * Gather examples of marshalling mechanisms: FFI, GpH (or whatever), tty, UI, CGI. * Paper title: "IO is marshalling", "Purely functional users", "Users are functions". === * Start thinking of programs in terms of lots of parallel activity with emergent behavior. Read up on amorphous computing. * Stream fusion talk: Johan J suggested tree-like types, and Duncan responded that the "next" (stepper) functions are trickier. * Revisit my choice of DeepArrow combinators. Are some there just to avoid polymorphism? * Still in Freiburg. CUFP is today. * Xavier L mentioned that he hates GUI programming and that people are content to use existing non-functional languages for GUIs (C++, Java). I was surprised to hear that, and I'm guessing behind his comment is the experience and assumption that GUIs are programmed in a very imperative style. Perhaps now is a good time to make a new version of Fruit, based on arrows & FRP, especially if I could use it to help (re)implement Eros. * Chatted with Mike Sperber. He knows the history of Fran implementations, and he did an interestingly different one himself, for drama lighting. His architecture is very different from what I've considered. He did polling with blocking and multi-threading. Polling an discretely changing value means a thread blocks until the next change. Use {{{select}}} to wait for the first of two values to change. I want to pursue that approach. Lots & lots of lightweight threads. Maybe this is the classic comparison between events & threads. Time-transformation is important to Mike, and now I remember that it's important to me. Make sure my new @Source@ stuff can support it. * Lunch with Leaf Peterson and Anwar Ghuloum from Intel. (I met Leaf in Seattle.) We talked about future of CPUs vs GPUs, data prallelism, etc. They have some projects (particularly graphics) that might be a good fit for me, if funding comes through. Leaf worked on the TIL compiler and is interested in exploring my data-driven ideas applied to modules. (He didn't on the spot buy my claim that imports are global variables. Alter my description.) In particular, he took the description to mean updating of a module through its import. Get in touch with them when I'm home, to schedule an Intel visit (Santa Clara). Leaf lives in Seattle and is in Santa Clara on Thursdays. * Consider sum types for TVs. ++++ \begin{code} oEither :: TV a -> TV b -> TV (Either a b) \end{code} === A general pattern for contravariant functors. Compare with @either :: (a -> c) -> (b -> c) -> (Either a b -> c)@. Where does "either" belong, algebraically? * Look up "An event detection algebra for reactive systems", 2004. * Thoughts on "IO is marshalling": ++++ * How to start working on ? Example applications. * Is a @TV (a -> b)@ a marshaller for @a -> b@. Is it //also// a marshaller for a user function of type @b -> a@? Is there research on algebras of marshallers? Somehow, it gives the user access to the function, but doesn't it also give the function access to the user? Hm -- the user has a much richer type than @b -> a@. What //is// a user's type? Call it @UserIn -> UserOut@. * If user & function are in mutual recursion, what's the result? I guess it's the conclusion or an ongoing byproduct of interaction. For instance, the result of "save", which may be a time-varying value, since one can save many times. * Consider a simple editor application. Define data types for input & output, separated from any human UI. Define a UI (scrolling, redisplay, etc, text and command input) and a semantic result (e.g., time-varying map from names to strings). * === * Revisit arrow-based Phooey (or Fruit), with more interesting inputs. * At [[Hac| http://haskell.org/haskellwiki/Hac_2007_II]]. * Thorkil Naur, Peter Naur's son, is there. Look at P.N's home page: www.naur.com. See in particular "An anatomy of the human life". * [[Slightly Larger Harpy Tutorial| http://uebb.cs.tu-berlin.de/harpy/download/larger-tutorial.pdf]] * [[Some MGS publications| http://mgs.lami.univ-evry.fr/PUBLICATIONS/publication.html]] * To do while here and no other collaborative stuff is going on: ++++ * Record and push my latest package changes, up through Eros. * Get Eros talk and notes on the web and linked from my home page. * Begin user notes for Eros. Maybe put on the Haskell wiki. * Mock up a TW for browsing source code. Use DList as a test case. === * I'm working with [[Russell O'Connor| http://r6.ca]] on an GuiTV interface for applying the [[Kelley Criterion| http://r6.ca/blog/20070820T175938Z.html]] to optimize investments. To do: ++++ * Regress my libraries so they run on ghc 6.6. Done. * Make an input for a drop-down list. ++++ \begin{code} choices :: [String] -> Input UI String \end{code} === Done. * Get his "invest" code installed on my machine. Done. === * [[Haskell Workshop videos| http://web.mac.com/malcolm.wallace/HaskellWorkshop.html]] posted. * Russell suggested using a zipper/derivative structure for the @choice@ function, since zippers & derivatives describe a value in a context. Interesting to imagine when an input is about navigating in a domain and when it's just about selecting. * Investment GUI works. :) Next make an input that polls (@IO a@) at regular intervals. Learn to use wxHaskell timers. Done. * [[Haskell for pluggable apps, with the GHC API| http://austin.youareinferior.net/?q=node/29]] * I've had to recompile wxHaskell each time I switch between ghc versions. Instead, change my {{{configure}}} options: {{{./configure --prefix=/usr/local/ghc-6.8.0.20071002 --with-opengl}}} . * I switched to ghc-6.6 so I could make sure Phooey etc work for 6.6. When switching back to 6.8, I get a strange message from ghci, something like "not built for interactive use". I installed the latest mingw snapshot, ghc-6.8.0.20071002. * I'm back home from Germany and so glad to be here. We got a good dose of rain last night. The air is clear, and the creek is running. Bill & crew finished the deck while I was away, and it looks terrific. * Called Campora Propane (209.736.0449) about our tank, which is now at about 38%. We have a 150 gallon tank, and they'll come out any time it's ready to take at least 75 gallons. I think they fill only to 85%, so I'll wait until it reads below 35%. * Here's an idea that came from a conversation with Cliff Beshers and David Fox in Freiburg. ++++ While building Linux packages, sometime a missing file is discovered late in the process. They'd like to detect such failures early on. The idea is to split each build chunk into one step that finds needed resources and another step that uses the resources. When composing build chunks, combine the first phases, and combine the second phases. ++++ \begin{code} -- Build step, building on a monad m data BuildT m a = forall r. BuildT (m r) (r -> m a) instance Monad m => Monad (BuildT m) where return a = BuildT (return ()) (\ () -> return a) BuildT prep bld >>= f = BuildT ( ... ) \end{code} === Oh! This idea doesn't seem to work with the @Monad@ interface, which allows the preparation step of the second chunk to depend on the result computed by the first step. Try having the monadic return value come from the prep step instead. ++++ \begin{code} data BuildT m a = BuildT (m a) (a -> m ()) instance Monad m => Monad (BuildT m) where return a = BuildT (return a) (const (return ())) BuildT prep snka >>= f = BuildT (do a <- prep BuildT mb snkb <- f a ...) ... \end{code} === Doesn't work out either, as it makes the second @BuildT@ too early. An applicative functor or arrow interface would give the necessary separability. First AF: ++++ \begin{code} data BuildF m a = forall r. BuildF (m r) (r -> m a) instance Functor m => Functor (BuildF m) where fmap f (BuildF prepX bldX) = BuildF prepX (fmap f . bldX) instance Applicative m => Applicative (BuildF m) where pure a = BuildF (pure ()) (\ () -> pure a) BuildF prepF bldF <*> BuildF prepX bldX = BuildF (liftA2 (,) prepF prepX) (\ (rf,rx) -> bldF rf <*> bldX rx) \end{code} === Then arrow: ++++ \begin{code} data BuildA (~>) a b = forall r. BuildA (() ~> r) (r -> (a ~> b)) instance Arrow (~>) => Arrow (BuildA (~>)) where arr f = BuildA (arr id) (const (arr f)) BuildA prep bld >>> BuildA prep' bld' = BuildA (prep &&& prep') (\ (r,r') -> bld r >>> bld' r') first (BuildA prep bld) = BuildA prep (first . bld) \end{code} === For instance, @(~>) = Kleisli IO@, where ++++ \begin{code} newtype Kleisli m a b = Kleisli { runKleisli :: a -> m b } \end{code} === === * Article: [[It's Time to Stop Calling Circuits "Hardware"| http://www.cs.ucr.edu/~vahid/pubs/comp07_circuits.pdf]]. * At ICFP, Mike Sperber told me a bit about his Lula implementation of FRP. He used threads and blocking on events, which I'd never considered. I think he said he used {{{select}}} for event disjunction. * Thoughts on representing behaviors/sources efficiently: ++++ Represent a behavior as a current value and an event. Keep current values alive in closures instead of a ref or recomputing. Switch to one-shot events and maybe blocking. For @(<*>)@, use @eitherE :: E a -> E b -> E (a + b)@, or @fmap@ and @mappend@. * First, reactive discrete behaviors. Represented as an initial value and a way to get more: ++++ \begin{code} data RB a = RB a (E (RB a)) instance Functor RB where fmap f = (pure f <*>) -- fmap f (RB a e) = RB (f a) ((fmap.fmap) f e) instance Applicative RB where pure a = RB a mempty bf@(RB f ef) <*> bx@(RB x ex) = RB (f x) (((<*> bx) <$> ef) mappend ((bf <*>) <$> ex)) \end{code} === Note that this formulation gives automatic value caching without refs. Current values ride along in the event handlers. * Next, simple continuous behaviors. Single out constant functions, for optimization. ++++ \begin{code} type Time = Double data CB a = K a | TFun (Time -> a) -- | As a time function asTFun :: CB a -> (Time -> a) asTFun (K a) = const a asTFun (TFun f) = f instance Functor CB where fmap f = (pure f <*>) instance Applicative CB where pure = K K f <*> K x = K (f x) sf <*> sx = TFun (tf <*> tx) where tf = asTFun sf tx = asTFun sx \end{code} === * Then reactive, continuous behaviors, simply by composing: ++++ \begin{code} type CRB = RB O CB \end{code} === * Think of @RB@ and @CRB@ as a "reactive normal form" ("RNF") for behaviors. Define semantics for behaviors; prove properties related to RNF; and then introduce the representations above, justified by the properties. * I could use one thread per output or one for all outputs, via @liftA2 (>>)@. The former has more concurrency, while the latter might foster sharing of computation. * On sharing, consider @g <$> (f <$> e)@ and @h <$> (f <$> e)@. * I could have a thread per @mappend@ also. Maybe one per @(<*>)@. May help with sharing. * Use @dup@ to track re-use. Maybe one thread per @dup@. @Applicative@ and @Arrow@ allow @dup@ detection. For @Arrow@, drop @arr@. * Try out various representations, measure performance on examples, and write up the results. * Go back to one-shot events as primitive. An occurrence can yield more (unfold-style). "Subscribe" to one occurrence at a time. Replace "subscribe" with "block". * Play with imperative, multi-threaded blocking style. Guess: that style allows //efficient// pull-based programming. Needs something like {{{select}}} for @mappend@. What exactly? Oh -- software transactional memory (STM). * For simple continuous (time-based) segments (@CB@ above), use another thread. Hide in an output setter. Implementation involves thread start/stop. === * For a bit of background on pre-monadic functional IO, see [[Imperative functional programming| http://citeseer.ist.psu.edu/peytonjones93imperative.html]] and its references, especially Section 3.1. * Musing over reactivity. Principle: an agent can only react to //internal// state, not external state. In other words, an agent reacts to its senses and what it extracts from its senses. When an agent is transformed in space or time, its sensory inputs are inversely transformed. * Tried [[MetaPlug| http://hackage.haskell.org/cgi-bin/hackage-scripts/package/metaplug]], a wrapper around {{{ghc-api}}}. Doesn't seem to be up to date with ghc 6.8 * Starting to poke around in {{{ghc-api}}} and other bits of {{{ghc}}}. Pretty complicated, so it will take time to get comfortable. I'll start preparing to use it by generating a data representation of @DeepArrow@, from which it will be trivial to generate code. * I've resurrected my @DeepArrow@ data representation, for persistence & compilation. I don't know quite how I want to apply it, however. Specifically, when I convert the data type in to a Haskell expression, do I want the expression to denote a pure value, or a whole TV, or what? For compilation, the value would suffice, while for persistence, I'd want the whole TV. One tricky bit: how to externalize an output, capturing current input states? * From Don Stewart, in response to a note I sent about separating IO & logic. +++> Thank you Conal, for reminding people about this :) Examples involving more composable IO stuff would be great, if you get the time, and would help spread the word . Cheers, Don === * Response to Joe Mitchell: +++> > I'm wondering if you would be willing to share more about how you use the OFNR steps. Sounds like your thinking on OFNR is just as a way of lining up the possible elements of a communication. And then once these elements are lined up, and the person has their heart open, it doesn't make much difference what they say -- their heart will lead the way. Am I hearing you correctly? Hi Joe. I got a smile seeing your name & note. "And then once these elements are lined up, and the person has their heart open, it doesn't make much difference what they say -- their heart will lead the way." I like how you put it, though I'd drop "once these elements are lined up", since the elements don't matter to me, only the openness. I like to use the elements where they're measurably helpful and replace them when they're not. I have stopped teaching "the OFNR steps", as I believe that any formula or procedure tends to undermine the central intentions of living connection with self & others. Instead, I see O, F, N, & R as four "tools" (among many) that can be helpful in supporting clarity & connection. I've also come to see NVC as a mainly *inner* practice, not as a communication method. For instance, the "observation" tool helps one get re-grounded in reality, dissipating confusions between the known and the imagined. That clarity is crucial in determining whether to direct requests inwardly or outwardly. Given that the observed event is in the past, and I like to live & connect in the present, I don't see value in recounting my observation to another person. Similarly, "feeling" awareness often helps me to get to my needs, so it's a very helpful internal tool. If I'm going for Connection, then I might ask a person to listen to my feelings & tell me what essence they're getting and what feelings come up for them. On the other hand, if I want to go for something other than connection, then stating my feelings (especially with more than one or two words) would compete for attention with the needs & request I'm going for. I especially like your phrasing "their heart will lead the way", as it captures my newer understandings clearly and poetically. Language skill can obscure what's made plain by the old plainly-jackal language, namely that one's consciousness is still mired in judgment. (Some examples at http://evolve.awakeningcompassion.com.) That's important information! I'd much rather people speak un-self-consciously, so that language clearly reveals consciousness. Shift the consciousness, and the language will then shift accordingly. I'd love to hear your reactions. Hugs, - Conal === * Progress in zipper-based bi-infinite image trees. Very elegant so far. A "space zipper" contains information (e.g., image samples) collected densely in a continuous space. A space zipper is infinite and continuous, like Pan's function-based representation, but does lots of caching, unlike a function. * More on space zippers: ++++ * At each node, compute an infinite sequence of refined approximations of a single sample approximating the whole interval. The zeroth approximation is from a sampling. The first approximation is the average of the zeroth approximations for the two (or four) sub-intervals, etc. * Have also an infinite sequence of coarsening approximations. The first (or //-1th//) is taken from the parent's zeroth approximation, the second from the parent's first coarsening approximation, etc. * For efficient blitting, I think I'll want to make and cache arrays of samples. Could be one array per interval or a sequence of arrays, having one element, two, four, .... If a sequence, then make the //i+1//th array by concatenating the //i//th arrays for the sub-intervals. What about bidirectional sequences here also? The //-(i+1)//th element has //1/2^(i+1)// samples and comes from halving its parent's //-i//th element. Taken literally, this is just silly talk, but I bet it hints at something lovely, probably from a relational point of view. * Hmm: lazy relational programming. Have two values (for instance) and a way to compute each from the other as well as each on its own. Assume that computing either from the other is cheaper than without the other (but not free). When a value is demanded and not yet computed, see if the other is computed. If so, compute from the other. If not, compute on its own. Generalize the idea past pairs of related values. Somehow guarantee that the two means of computation yield the same result, so that the semantics is independent of evaluation order. * When it's time to blit some pixels, I could either grab some number of fixed-size arrays, or piece together a variety of sizes. The latter approach would preserve more laziness. I'm assuming each array is strict, i.e., elements fully evaluated (and unboxed). === * On Levenshtein (edit) distance: ++++ {{{ <chessguy> anybody know of a haskell implementation of the levenshtein distance algorithm? <ddarius> chessguy: lambdabot <conal> @go levenshtein distance <lambdabot> http://en.wikipedia.org/wiki/Levenshtein_distance <lambdabot> Title: Levenshtein distance - Wikipedia, the free encyclopedia <chessguy> conal, that doesn't have a haskell implementation [09:41] <ddarius> chessguy: lambdabot uses the Levenshtein distance to "fix" "mistyped" commands. <chessguy> ddarius, yeah, but i bet that's going to be a bear to find <ddarius> lambdabot isn't that big [09:42] <conal> chessguy: the "@go" was for me. i didn't know what "Levenshtein distance" is. <twanvl> See: http://www.cse.unsw.edu.au/~dons/lambdabot/Lib/Util.hs <chessguy> aha <conal> I wonder why darcs doesn't use Levenshtein distance. <chessguy> liyang, nah, there are lots of imperative implementations, most of which could easily be directly translated from that wikipedia page <ivan> it's N^something? [09:45] <dankna> because all it gives you is a distance and not the patch to achieve it }}} === * [[View patterns: lightweight views for Haskell| http://hackage.haskell.org/trac/ghc/wiki/ViewPatterns]]. Apparently just checked into GHC head. * To do: add {{{-fno-warn-orphans}}} to some of my source files, to supress orphan instance warnings. * Noticed in {{{Data.Tree}}}: ++++ \begin{code} instance Applicative Tree where pure x = Node x [] Node f tfs <*> tx@(Node x txs) = Node (f x) (map (f <$>) txs ++ map (<*> tx) tfs)
\end{code}
=== Compare with this code from [[2007-10-10]]: ++++
\begin{code}
instance Applicative RB where
pure a = RB a mempty
bf@(RB f ef) <*> bx@(RB x ex) =
RB (f x) (((<*> bx) <$> ef) mappend ((bf <*>) <$> ex))
\end{code}
===
{{{
<fasta> conal: I think I would use STArray and then freeze it.				      [15:28]
<fasta> conal: if there are no incremental updates, Array is fine too.
<conal> fasta: thanks.  and can you tell me how STArray & UArray relate?
<fasta> conal: STArray relates to Array
<fasta> conal: STUArray relates to UArray
<conal> and then there's GHC.PArr							      [15:29]
<mrd> \@hoogle runSTUArray
<lambdabot> Data.Array.ST.runSTUArray :: Ix i => ST s (STUArray s i e) -> UArray i e
<conal> fasta: and the U is "unboxed"?
<fasta> conal: I wasn't aware of that one, but that's for the nested data para. probably?
<fasta> conal: yes
<mrd> > runSTArray (do a <- newArray (1,10) 1; return a)
<lambdabot>   Not in scope: newArray'
<conal> fasta: thanks.  and yes. PArr is for ndp.
<fasta> conal: is GHC.Arr documented already?						      [15:30]
<mrd> > runSTArray (do a <- Data.Array.MArray.newArray (1,10) 1; return a)
<lambdabot>   Not in scope: Data.Array.MArray.newArray'
<conal> fasta: i found a bit on the wiki (i forget whether the haskell wiki or ghc wiki)
<conal> i like the look of runSTArray & runSTUArray.					      [15:32]
<fasta> conal: They are just wrappers around runST, I think				      [15:33]
> yes
<mrd> and unsafeFreeze
<mrd> but they do let you omit a type annotation here or there
<mrd> \@wiki SPOJ									      [15:36]
<mrd> I have a little intro to mutable Arrays in there
}}}
===
* Stumbled across Intel research blog, including some [[by Anwar Ghuloum| http://blogs.intel.com/research/authors#anwar_ghuloum_]] on data parallelism etc.
* [[Lazy SmallCheck| http://www-users.cs.york.ac.uk/~mfn/lazysmallcheck]]: a very clever idea.  Uses monotonicity to verify properties over infinite input subspaces by applying to //partial// (error-ful) values.
* Space zipper stuff going well.  I now have elegant definitions of intervals, infinite binary trees, infinite streams, infinite bi-streams, 1D & 2D space zippers, 1D bi-infinite dense sampling, and same with infinite approximation bi-streams.  Next: replace samples with streams of sample arrays (1 element, 2, 4, ...).  Do I want an approximation bi-stream of streams of arrays, or a stream of approximation bi-streams of arrays?  I want to have a variety of sizes of arrays, so I can efficiently piece them together when rendering.
* I want to write QuickCheck properties (automated tests) for SpaceZip.  How do I do it, considering that my data structures are all infinite?  The same problem arises for functions, and is addressed with a @forAll@ property.  Can I map my types as functions from some kind of index type?  Probably yes.  For instance, @asFun :: BinTree a -> [Dir] -> a@.  Generalize to a type class, and hide the @forAll@ in @(==*)@.  Here are some properties: ++++
\begin{code}
-- BinTree
asFun (fmap f bt) ==* fmap f (asFun bt)

-- BiStream
asFun (fmap f bt) ==* fmap f (asFun bt)
forward . back ==* id
back . forward ==* id

-- Loc (SpaceZip)
(zoomIn (ctxDir (ctx l)) . zoomOut) l ==* l
forAll $\ dir -> zoomOut . zoomIn dir ==* id forall$ \ dir -> pan (otherDir dir) . pan dir ==* id
\end{code}
===
* Got a project {{{http://code.haskell.org/Eros}}}, with files at {{{conal@code.haskell.org:/srv/code/Eros}}}.  To do: send in requests for other projects.  But oops!  I can't connect with SecureCRT.  My key gets rejected.  Perhaps as {{{ndm}}}, since he connects via a Windows machine.  First, make sure my {{{authorized_keys}}} on {{{conal.net}}} and {{{code.haskell.org}}} are identical, as well as SecureCRT settings.
* Paper title: "Space-time zippers".  Remember, though, that I want to include many more dimensions, beyond space & time.
* Responded to Joe Mitchell & Anjali Corinne, asking how I teach NVC.
* Installing Python-2.4.4 on Joseph's new server, since moinmoin and ziddlywiki both rely on 2.4 rather than 2.5.  {{{./configure}}}d with {{{--prefix=$HOME/python-2.4}}}. I'll have to tweak the moinmoin and zope start-up shell scripts accordingly. Starting up zope, I get {{{Undefined symbol: PyUnicodeUCS4_AsEncodedString}}}. The old server is running 2.4.3, so I switched to that one. Same message. Maybe there's another change I have to make also. Instead, work on migrating the data. * For SpaceZip, I'm using "model-based" testing, as in [[bytestring| http://code.haskell.org/bytestring/]]. The idea is to specify operations in terms of equivalence to the operations on a simpler "model" type. For bytestring, a model is lists. For SpaceZip, I'm using functions as models. For instance, model a bi-directional stream as @Int -> a@ and an infinite binary tree as @[Dir] -> a@. I started modeling a space zipper as @[Step] -> a@, where @data Step = Up | Down Dir@. Then @zoomIn dir@ and @zoomOut@ come out //very// simply, but I don't know how to do @pan@. I like this problem, because it tells me that I don't have a clear semantic basis for what I'm doing. Perhaps unsurprisingly, I'm also unclear about how I'll use the space zipper. Misc puzzle pieces: ++++ * At one level, I'm representing a function over space: @f :: R -> a@. * For rendering into discrete, retangular bitmaps, I want to extract finite, approximations of @f@ over intervals. The approximations are piecewise-constant with regularly spaced constant sub-intervals. They could easily be piecewise-linear instead. * In addition to a moving view interval, I also want variable zoom factor. In other words, the width of a facet varies. * To allow progressive refinement, compute not just one value per facet, but an bi-infinite stream of improving values. * Some implementation details: ++++ * For efficient access, package up sequences of approximation streams into approximation bi-streams of arrays. * For laziness, define these arrays as concatenations of shorter (half-size) arrays, in a binary tree. Given an actual display interval and resolution, construct the display array from as few/big pieces as possible. * For re-use, compute and store only power-of-two resolutions. Dynamically resample during discrete rendering. * During progressive refinement at a given resolution, higher resolutions are being computed, as well as contributions to improved versions of lower resolutions. Work for every resolution contributes to every resolution. === * What's the specification, separated from the efficient representation? Start with generating an approximation bistream for single facet. How to specify the facet? Try simply as an interval. Sample the function once in that interval for the zeroth approximation. Combine with approximations from the two sub-intervals for the refined approximations. What about the coarsening approximations? There isn't enough information to compute them, since we don't know whether a given interval is the lesser or greater half of its parent interval. I could pass in a stream of either directions or approximations. Or as an initial simplification, drop the coarsening approximations. * Next, extend to arrays of continguous facets. Approximation stream of arrays. Arguments: lower bound, facet width, and number of facets. Easy to implement/specify via the single-facet interface. * What allows a data-rich representation of these interfaces with maximal re-use? Restrict some of the arguments. ++++ * Facet size (inverse of sampling resolution) must be a power of two (often a negative power). Approximate other sizes by dynamic resampling. * Facet lower bound must be a multiple of the facet size (pixel). * Facet array sizes must be power of two and must start at locations friendly to that size. (Say more.) Dynamically assemble all others from the standard ones. What's a friendly start location? It depends on how the zippers are laid out. Currently I'm using a funny alternating layout. Going upward from the initial focus, the intervals are (0,1), (-1,1), (-1,3), (-5,3), (-5,11), (-21,11), .... (Simpler would be (0,1), (0,2), (0,4), ..., but the negatives wouldn't be covered. I could have two data structures instead, handling positive and negative numbers separately.) === * Track facet size in the @Loc@ (zipper) type, as a third element, along with the @BinTree@ and @Ctx@. The @pan@ function keeps the facet size unchanged, while @zoomIn@ halves and @zoomOut@ doubles. Oh -- track the whole interval. * Here's how to make an array of sample values for a given (power of two) facet size, lower bound, and number of facets. Start at some location in the zipper. Zoom out as necessary to an interval that contains the desired interval. From there recursively process. If a given interval is contained in the target interval, add it. If disjoint, discard. If overlap, recursively subdivide. Very parallelizable. Lesser and greater halves may be optimized separately. The lower bound of the lesser half is at least the lower half of the target interval, and similarly for the upper bound of the greater half. * Each node represents an interval and contains a stream of approximation bi-streams of arrays. In the outer stream, the //i//th element is a bi-stream of //2^i// element arrays. The //0//th element is just singleton arrays containing the sample bi-stream. The //(i+1)//th element comes from concatenating corresponding //i//th elements from the two sub-intervals. * Look up Chris Okasaki's article on type-enforced square arrays. === * Read blog post [[Simple Haskell Web Programming with HAppS| http://bluebones.net/2007/09/simple-haskell-web-programming-with-happs]] * There still doesn't seem to be an TiddlyWiki that downloads tiddlers on demand. Start noodling over an elegant Haskell-based TW-like thing. * In {{{inf-haskell.el}}}, sometimes {{{inferior-haskell-cabal-buffer}}} is bound to a killed buffer. The symptom is "selecting deleted buffer" when I load a module, and the work-around is {{{(setq inferior-haskell-cabal-buffer nil)}}}. * To explore also: ++++ * IA refinement streams * Uncurried space zippers (quadtrees, etc) === * Made http://emergence.awakeningcompassion.com point to a static html file. Copied the "related documents" into [[other/| http://emergence.awakeningcompassion.com/other]], and made new pdfs there. * [[Damn Cool Algorithms, Part 1: BK-Trees| http://blog.notdot.net/archives/30-Damn-Cool-Algorithms,-Part-1-BK-Trees.html]]. Exploits the triangle inequality (for metric spaces) to give a data structure for fast searches of near-misses, e.g., spelling errors. * Considering how to compose approximation streams (ASs). Suppose I have a sample value //x1// from an interval's midpoint and two ASs, //l1,... and g1,...// for the interval's halves. I'd like to make an AS for the interval, in which the //i//^^th^^ element is the average of //i// samples. Try this for the values to be blended: //[x1], [x1,l1], [x1,l1,r1], [x1,l2,r1], [x1,l2,r2], [x1,l3,r2], [x1,l3,r3], ...//. Weight according to the subscripts, e.g., //(1*x1+3*l3+2*r2)/6//. Simpler: produce the sums rather than the averages. Then //x1+l3+r2//, and do the division as the results are read out. * Still simpler would be not even add, but rather produce a stream of samples. Then @x1 <:> (ls interleave gs)@. We may lose some sharing of sum computations. * Added @scanl@ and @interleave@ to @Data.Stream@, and emailed a darcs patch to Wouter S. * Removing some processes from my startup ({{{msconfig}}}): PDVDServ, NeroCheck, ifrmewrk, EQUWiz, DMEDIA, BatteryLife, PowerForPhone, ACMON, sm56hlpr, * Getting started with OpenGL. ++++ * I don't seem to have the Haskell GLUT package, and I was unable to install it. * The wxHaskell contrib GLCanvas sample compiles & works. * Looked around for CSG and found [[GTS| http://gts.sourceforge.net]] ("GNU Triangulated Surface Library"). In compiling it, I need [[pkgconfig| http://www.freedesktop.org/software/pkgconfig]] and GLIB. Installing pkgconfig seems to have brought along GLIB. Compiling GLIB fails. I manually tweaked the config .h files +++ {{{ mv glibconfig.h glibconfig.h-orig cp glibconfig.h.win32.in glibconfig.h }}} === and continued. Got further. Oh! I think my MinGW is interfering. I could try a [[MinGW version of pkgconfig| http://www.mingw.org/MinGWiki/index.php/pkg-config]], but it looks hairy. Instead, shuffle my PATH so that cygwin comes before MinGW. I got different problems with glib. Apparently it doesn't work well with cygwin either. Maybe [[GTK+ (not GIMP) for Windows| http://www.gimp.org/~tml/gimp/win32/downloads.html]] will help. Oh -- grab pkg-config and glib from [[Cygwin| http://cygwin.com]]. I'm always nervous about cygwin updates, worrying that something will break, completely unrelated to what I'm downloading. Got pkg-config and gli, allowing GTS config to get a bit further. Got glib-dev. Now "./configure" goes through fine. Also "make". "make check" has a some errors. * [[Haskell OpenGL tutorial and pointers| http://blog.mikael.johanssons.org/archive/2006/09/opengl-programming-in-haskell-a-tutorial-part-2]]. * [[K-3D| http://www.k-3d.org]] is a modeler that uses GTS. * [[OpenGL Programming Guide or 'The Red Book'| http://fly.cc.fer.hr/~unreal/theredbook]]. Better formatting [[here| http://cs-sdl.sourceforge.net/index.php/Red_Book_Table_of_Contents]]. More (including the "blue book" reference manual and SDL info) [[here| http://cs-sdl.sourceforge.net/index.php/Main_Page]]. === * Gem from {{{#haskell}}}: ++++ {{{ <EvilTerran> > uncurry$ (***) on ((/) on fromIntegral)
<lambdabot>  <(Integer,Integer) -> (Integer,Integer) -> (Double,Double)>
}}}
===
* Cliff Beshers's [[n-Vision work| http://www1.cs.columbia.edu/graphics/projects/AutoVisual/AutoVisual.html]].
* SpaceZip: ++++
*  What sampling interface do I want for SpaceZip?  Ask for a stream of arrays of samples of a given facet (pixel) size, lower bound, and number of facets.  Require that the pixel size is a power of two (often negative) and the lower bound is a multiple of the facet size.  No restriction on the number of facets.  Hm.  I don't see how to implement that interface, considering my present alternating initial ascending pattern of intervals.
*  How about this interface for a data structure: zoomIn, zoomOut, prev, next, value.  It's like a bistream (prev, next, value), but with zooming in & out to double or halve the value resolution.  Add a constant "init", which could be a space zipper (new name please) containing a description of the intervals themselves or maybe just their midpoints.  It's also a functor.  And maybe another map-like interface that gives access to information from above (zooming out) or below (zooming in).  Also a "seek" that takes a resolution and lower bound and moves there (efficiently).  And maybe one that extracts an array or (other efficient sequence type) of values, starting with the current value and moving right (next).
*  I thought it would be awkward to have one SpaceZip data structure for the positive numbers and another for the negatives, but now I think that this pair will be a sort of zipper.  Once I allow the two structures, I can have a very simple, non-alternating pattern of interval expansion.
*  Here's a very straightforward representation: a @data@ structure with fields for value, prev, next, in, out.  Tie them all together carefully as in "Weaving a Web".  Call it "SpaceWeb" or "DenseWeb" or just "Dense".
===
* [[Lazy Functional Quaternions| http://www.citeulike.org/user/conal/article/1837334]] by Bryan O'Sullivan (bos on #haskell).
* I'll be giving a tech talk at Google next week.  Bryan O'Sullivan (bos) helped with the initial connection, and it work out very quickly from there.
* Responded to a recent note from Harry Pearle about his education book etc.  He still plans to use one of my Pan pictures on the cover (my four kids tiled to infinity and contracted to a disk), and maybe more as well.  I pointed him to Daniel Quinn on education.
* Took Holly to the Sacramento airport.  She's going to Seattle for a week for [[various vocal gigs| http://hollyc.com/hollosphere/?p=25]], performing & recording with Michael Stillwater.
* Thinking more about spatial data representations.  One choice is zipper vs [[web| http://www.citeulike.org/user/conal/article/1844316]].  I realized I wasn't clear how to build even a simple 1D web, i.e., bi-infinite doubly linked list.  Came up with this start: ++++
\begin{code}
data BiStreamW a = { prev :: BiStreamW a, value :: a, next :: BiStreamW a }

numbers :: forall a. Num a => BiStream a
numbers = nums'
where
nums' = BiStream (back nums') 0 (forw nums')
-- continue to elements before or after a given one
back, forw :: BiStream a -> BiStream a
back n' = s' where s' = BiStream (back s') (focusValue n' - 1) n'
forw p' = s' where s' = BiStream p' (focusValue p' + 1) (forw s')
\end{code}
\begin{code}
instance Functor BiStream where
fmap (f :: a -> b) (BiStream wp wv wn) = wbs'
where
wbs' = BiStream (back wbs' wp) (f wv) (forw wbs' wn)
-- continue to elements before or after a given one
back, forw :: BiStream b -> BiStream a -> BiStream b
back n' (BiStream p v _) = s' where s' = BiStream (back s' p) (f v) n'
forw p' (BiStream _ v n) = s' where s' = BiStream p' (f v) (forw s' n)
\end{code}
=== Next, generalize @fmap@ so that the given function has access to the previous and next values as well as the current.  Specialize to two versions that ensure well-founded data dependencies.  Redefine @integers@ in terms of this map-like function.
* [[Marshalling arrays| http://haskell.org/ghc/docs/latest/html/libraries/base/Foreign-Marshal-Array.html#v%3AwithArray]] -- probably useful for bitmap graphics.
* [[Core77 design magazine| http://core77.com]]
Type the text for 'New Tiddler'
* Dense ++++
*  Getting more comfortable with QuickCheck.  Lots of testing and testing tools in place.
*  I re-discovered the problem of regular zoom-out pattern: no negatives.
*  Idea: don't assume that zooming out is well-defined without a specified direction.  Use the initial alternating zoom-out pattern, to cover all of //R//.  Oh, then I can't assume that the lower bound of an interval is a multiple of its width.  That assumption is important because the requests for arrays of pixels will be aligned with the pixel size.
*  Main @Dense@ interface: @fetch@ a stream of arrays of samples (perhaps averaged), given the facet size, lower bound, and count.  Returns the array stream and a repositioned @Dense@.
*  Maybe think of a @Dense@ representation this way: like a @BiStream@ (zipper), with decomposition and coalescing for log-time @seek@.  For instance, to a @next@ would move the current @BinTree@ into the previous context.  ... Oh, hmm.  If I break an tree in two, I have to hang onto the value part in addition to the two halves.
*  Or use a web instead of a zipper.  Give that a try.
===
* Tidbit from dons: ++++>
I often use this in my cabal ghc-options:
{{{ghc-options: -funbox-strict-fields -O2 -fasm -Wall -optl-Wl,-s}}}
the last runs ld's strip automatically.
===
* Replied to Joe M on the NVC-cert list, about "giving" "empathy to a person who is using power over strategies to meet their need for security". +++>
Hi Joe,

Some thoughts:

I'd watch out for any notion at all of "giving" empathy.  The word "giving" suggests that it's for the other person, which is dangerous territory.  In this case, I suspect there's something you're wanting for yourself.  If so, then trying to "give" anything is likely to confuse both of you.  I've sometimes "given" what I've heard aptly called "empathy from Hell", when I try to use the outward *form* of empathic guesses, when there's really something I want for myself.  Blech!

I don't think your *empathy*, or even your empathic guesses, are triggering them, but rather your *expression* of those guesses to them, verbally.  It helps me to think of empathy as purely an internal state of being.  And one that happens or it doesn't.

Could it be that your desire to do the empathy form is a strategy for self-protection?  I think that's been the case for me.  And no wonder the results stank, given the fundamental incongruence between my inner desire and outer behavior.

I still harbor habitual judgments about "power over" strategies.  If I'm thinking in those terms, the other person is probably not going to enjoy being around me.  In those situations, I usually get scared, and by pathologizing the other person, I can take some of the sting out of what I'm hearing.  I also wonder if anything similar is going on for you.

Finally, the (perhaps pseudo-) needs of "security" and "safety" almost never warm my heart.  Lately I've been noting that warm/cold (open/closed) response in me and using it as a signal to keep looking until my heart genuinely opens.  Or, to choose to disconnect from the person and turn my attention elsewhere.  I don't really believe any of us has security/safety, in the sense that we can get physically, emotionally, financially clobbered at any time.  On the other hand, I do warm to the notion "inner peace", which people usually seek by piling up conditional security (bank accounts, insurance policies, personal promises/commitments, etc).  I've noticed in NVC language, people often substitute these conditional strategies for deeper, universal needs.  A clue is the placement of a preposition after a Need word, such as "trust that ..." instead of "Trust" or "integrity of ..." instead of "Integrity".  More on this difference at http://evolve.awakeningcompassion.com/?p=19 and the older posts linked there.

Warmly, - Conal
Hi Joe.  Here's an after-thought:

If your best assessment is that (a) safety/security is up for them, and (b) you saying so will trigger them further into fear/insecurity, then I imagine that the strategy of verbalizing your guess to that person would be counter-productive for both of you.  I think that's what you're saying.   Empathy + silence is probably a better choice.  Or shift from empathy to honesty (carefully -- http://evolve.awakeningcompassion.com/?p=23 ).
=== Joe's message: +++>
Dear NVC people,

I have always found it difficult to give empathy to someone who is in a "power over" strategy.  I'm guessing that the other person's  need is for security and safety.  But it seems like anything that I say that gets even close to identifying their true need as "security " and "safety" only further threatens their sense of security and safety even more and therefore is not heard as empathy but is heard as a suggestion of weakness. It seems to make them cling even more strongly to their power over strategy.

I have one person in my life who seems to use "power over" strategies to get their need for security and safety met.  When I guess that they may be feeling afraid because they want to feel safe and secure, it seems to only make them more adamant about having power.  I can't remember the empathy ever being effective.  I can't remember the empathy ever creating any relief or shift.  I can't remember empathy in this situation ever being welcomed or supportive.

It seems to me that empathy that identifies the need as "security" for a person who is insecure already, is really counterproductive.  The person who is insecure wants to look strong and doesn't want to hear about "security or safety". I am almost ready to give up any attempt at giving empathy to a person who is in a power over strategy.  Am I missing something here?

I am interested in your comments, reactions, thoughts and theories about how to give empathy that is effective to a person who is using "power over strategies" to meet their basic need for security.
===
* Some Internet goodies ++++
*  [[Top 10 Free Video Rippers, Encoders, and Converters| http://feeds.gawker.com/~r/lifehacker/full/~3/177777292/top-10-free-video-rippers-encoders-and-converters-316478.php]]
*  [[Turn Any Action into a Keyboard Shortcut| http://feeds.gawker.com/~r/lifehacker/full/~3/177245342/turn-any-action-into-a-keyboard-shortcut-316589.php]]
*  [[Frameless Rendering: Double Buffering Considered Harmful| http://www.citeulike.org/user/conal/article/1857392]]. Abstract: +++>
The use of double-buffered displays, in which the previous image is displayed until the next image is complete, can impair the interactivity of systems that require tight coupling between the human user and the computer. We are experimenting with an alternate rendering strategy that computes each pixel based on the most recent input (i.e., view and object positions) and immediately updates the pixel on the display. We avoid the image tearing normally associated with single-buffered displays by randomizing the order in which pixels are updated. The resulting image sequences give the impression of moving continuously, with a rough approximation of motion blur, rather than jerking between discrete positions.
We have demonstrated the effectiveness of this frameless rendering method with a simulation that shows conventional double-buffering side-by-side with frameless rendering. Both methods are allowed the same computation budget, but the double-buffered display only updates after all pixels are computed while the frameless rendering display updates pixels as they are computed. The frameless rendering display exhibits fluid motion while the double-buffered display jumps from frame to frame. The randomized sampling inherent in frameless rendering means that we cannot take advantage of image and object coherence properties that are important to current polygon renderers, but for renderers based on tracing independent rays the added cost is small.
=== See also [[this summary| http://www.cs.utah.edu/~shirley/papers/rtrt/node4.html]] and this [[note from Kragen Sitaker| http://lists.canonical.org/pipermail/kragen-fw/2000-April/000201.html]].
===
* This Zen story from [[John Sullivan's home page| http://facstaff.elon.edu/sullivan]] touches me: ++++>
A samurai warrior comes up to a little monk and says: Teach me about heaven and hell.  "Teach you?" the little monk replies, "why you are a dirty, smelly, poor excuse for a samurai.  Even your sword is rusty!" Insulted, the samurai, flush with anger, draws his sword and is about to cleave this insolent monk in two. A split second before he strikes, the monk says:  "That's Hell."  The samurai has a moment of insight.  He realizes that this monk has gone to the very door of death to teach him.  He fills with gratitude, his body relaxes and he sheaths his sword.  At that precise moment, the monk says: "That's Heaven."
=== The rest of his 9/12/2001 talk on Dante's //Inferno// is wonderful also.
* Dense (SpaceZip): ++++
*  Noodling interfaces.  Work from the outside inward, i.e., from the rendering application to the Dense library.  The app is an interactive viewer of infinite, continuous 2D images (think [[Pan| http://conal.net/Pan]] or [[Pajama| http://conal.net/Pajama]], but could be 3D-based as well).  The user pans and zooms arbitrarily and continuously.  After a model or view change, the client has to access another approximation stream of pixel arrays and start grabbing arrays from it.  The client's ideal pixel size won't generally be available, since Dense stores values only for power-of-two-sized intervals.  So the renderer will have to pick an available resolution and then do resampling.  I want the resampling to be both fast and smooth, probably done by 3D hardware.  Most of the time, we'll be grabbing (and hence lazily computing) more pixels than requested, rounding up to a power of two, and then interpolating.  Maybe the interpolation step is where I can also do sub-pixel shift.  Besides the mismatch between requested resolution and available ones, the requested pixel alignments don't match what's stored.  Since I have to deal with alignment mismatch anyway, maybe the odd alternating ascending zipper pattern is no additional problem.  Provide the specs of the ideal sampling and get back something close from the Dense rep, with a transformation to the ideal.  One transformation holds for all elements in the approximation stream.
*  Question for IRC when OpenGL folks are around: ++++
i'm wondering what sort of 2D image support openGL has. or even how to thing GL'ily about images.  textures on a quad?  other choices?
also, how best to get fast, good-looking image resampling (rescaling), with sub-pixel motion.
i'm working on a new representation for infinite continuous images etc (functions over an infinite continuous domain).
===
*  Looked in to OpenGL for image presentation.  I think there are two choices: (a) texture mapping, and (b) @drawPixels@.  From what I've read, the latter does no interpolation on scaling, so texture mapping with bilinear interpolation is the way to go.  (It has mipmapping and //tri//linear interpolation, but I don't think I'd use mipmap.  My approach is sort of infinite, zipper-based mipmaps -- "zipmapping".  Or maybe mipmapping would be useful with my approach.)
*  3D hardware does other transformations, such as rotation.  Hm.
*  When I write up or talk about Dense, clearly express that it's about approximating the average value of a function over an interval (i.e., integral over size).  That's why I average sampled values.
*  Hm: when the user does a very small change to the view (or model?), do we really want to start over at a coarse approximation?  Doing so would make a visible discontinuity.  Try and see.
*  Hm: "forward" and "back" fit better than "previous" and "next" for bi-streams.  The latter two names sound like element accessors.  Or a counterpart to "tail".
*  How does user input / space exploration relate to data-driven computation?
===
* Switched to ghc-6.8.1.20071102 snapshot for win32.  Still doesn't come with GLUT, but I was able to build GLUT without problem.  Recompiling wxHaskell.
* Compiled & ran an OpenGL program, after installing OpenGL and FreeGLUT from Cygwin.  Compile line: {{{gcc t1.c -lglut32 -lglu32 -lopengl32 -o t1.exe}}}.  (I found these {{{-l}}} options from building the Haskell GLUT package, which isn't pre-installed on the Windows snapshots.)  The compile line works without also the {{{-lglu}}}, which I found described [[here| http://dri.freedesktop.org/wiki/GLU?highlight=%28CategoryGlossary%29]]: ++++>
Graphics Library Utilities (GLU)
OpenGL is good but doing some common operations is a regular pain in the proverbial. GLU is a platform independent library that can build spheres, perform collision detection, determine if a point is inside a 3D shape, etc. GLU works on top of OpenGL.
===
* Problems with GLUT & ghc.  Lots of link-time errors when I ghc-compile.  With ghci, sample programs wedge the session.  Fortunately, the wxHaskell OpenGL demos work just fine.
* The Dense sampling pattern could be any number of sequences.  I could try some randomness.
* [[Chipmunk game dynamics| http://wiki.slembcke.net/main/published/Chipmunk]], which includes links to some nifty dynamics papers, including [[these ones| http://www.beosil.com/publications.html]]
* Thoughts about next week's Eros talks: ++++
*  Usability and composability appear to be at odds.  Perhaps the tension is an illusion.  Usability and composability are both supported by //interfaces// of different styles (IO/GUI vs types).  Look for a unifying notion of interfaces that combines the strengths of each.  Unix pipes is an effective & limited attempt in this direction.  (Discuss.)  Others?
*  Interfaces describe and discipline the composition "components", which may be software or human (etc), by enabling them to communicate, i.e., exchange information.  For GUIs, the composition is software/human, while for programming libraries, the composition is software/software.
*  Examples: terminal IO, GUIs, typed functions.
*  GUIs show us that interfaces can be user-friendly, while programming languages show us they can compose.  Let's do both!
*  Idea: loose decoupling of content and interface.
*  Give composition to the user
* "Any sufficiently complicated C or Fortran program contains an ad hoc, informally-specified, bug-ridden, slow implementation of half of Common Lisp." Greenspun's Tenth Rule
===
* Idea: formally relate my denotational and operational models of events.  Formulate and test properties and equivalence via QuickCheck.
* Working on my new Eros talk.  New material: ++++
*  Usability and composability (applications and software libraries)
*  The Unix philosophy, according to Doug McIlroy.
*  Functional programming as modernized & improved realization of Unix's "principle of composition".
*  ...
*  What's functional programming?
=== Here's a possible new title: "Tangible functional programming: a modern marriage of usability and composability" or "Tangible values: usable and composable".  Most programmers probably think of "values" as much less expressive than I mean.
* The Unix solution uses applies one interface style for use and composition.
* "As commonly practiced, programming has a strong bias toward the left-brain creative mode, in that it's abstract, linguistic, and usually sequential.  (The last point applies much less to functional programming.)  Computation is an extremely powerful medium of communication, and the left-brain bias means that only some people have access to that medium."
* Back from the SF Bay Area, where I spoke at Google (Wed), Intel (Thu), and Apple (Fri).  My talks were well-received.  I could see clear, motivated interest better at Intel & Apple (I&A) than at Google.  Perhaps the difference had to do with having inside connections at I&A, so that my visit was more specifically motivated.  Also, I&A make computers, so they're concerned about parallel programming.  I also got a lot of stimulation from the conversations at I&A.  I came away much more interested in ongoing working relationships.
* Leaf P and I played with an example of imperative code for a text-based interaction.  In rewriting it to functional form ("genuinely" functional, not IO), I tried something close to the pre-monad, stream-function style.  It was awkward.  An approach based on functional events could be simpler and more modular.  I don't remember the example precisely, so here's a variant, translated to Haskell with IO: ++++
\begin{code}
squares :: IO ()
squares = do putStr "next number: "
putStrLn $"square: " ++ show (n*n) when (n < 100) squares \end{code} === One functional variation would have type @[String] -> [String]@, very like the pre-monad IO style. The output mixes prompts and squares. The connections between (a) input numbers & squares and (b) prompt and entered numbers are not obvious. So how about an FRP approach? This example is all discrete, so use only events. There's are output events for the the prompts and squares and one input event for the next entered number. The prompt is always the same, so doesn't really carry any information. The type is thus @Num a => Event a -> (Event (), Event a)@. Note some substantial improvements implied by this type: ++++ * Modularity: any number type and any prompt or output labeling may be chosen later. * Since the semantics of events includes exact timing info, an event-based formulation will have well-defined timing semantics. * Since the type doesn't involve IO explicitly, nor strings, it is composable. * Similarly, input & output can work in a variety of settings besides terminal IO, including GUIs and network transport. === Idea: use variants of list functions wherever possible for manipulating events, except a different @Monoid@ instance (merge rather than append). Here's a new version. ++++ \begin{code} squares :: Num n => Event n -> (Event (), Event n) squares num = (prompt, sq) where -- Prompt initially and while the request is < 100 prompt = start mappend forget (takeWhile (< 100) num') -- Numbers considered (and prompted for) num' = takeUntil (not . (< 100)) num -- Squares of those numbers sq = fmap (^2) num' -- Initialization event. Occurs once. start :: Event () forget :: Functor f => f a -> f () forget = fmap (const ()) \end{code} === I'm not content with this code yet, mainly because of the repeated |(< 100)| test. * Here's another version of @squares@, using a boolean behavior (@running@): ++++ \begin{code} squares :: forall n. Num n => Event n -> (Event (), Event n) squares num = (prompt, sq) where ok = (< 100) numOk = fmap ok num okNum = num suchThat ok prompt = start mappend (forget numOk whenE running) running = True stepper numOk sq = fmap (^2) okNum \end{code} === Types: ++++ \begin{code} ok :: n -> Bool numOk :: Event Bool okNum :: Event n prompt :: Event () sq :: Event n \end{code} === * Here's a different tack: use single-occurrence events that yield new events, like what's described for reactive behaviors on [[2007-10-10]]. Unfold a succession of events. Each outgoing prompt event defines a single corresponding read event. Each input event defines a corresponding write event and //possibly// a prompt event. The first time @n < 100@ fails, there will be no further prompt and so no further read or write. Try it out * Idea: The type @Occ a@ refers to a single occurrence, including the time. Semantically, @Occ a = (Time,a)@, but we can't extract generally a precise time value before the event occurs. The semantics of @Event a@ is @[Occ a]@, subject to the constraint that the occurrence times are monotonically non-decreasing. One tool for making events: ++++ \begin{code} unfoldE :: (b -> Occ (a,b)) -> b -> Event a \end{code} === Note the type @unfoldr :: forall b a. (b -> Maybe (a, b)) -> b -> [a]@. Can I use @unfoldr@ directly? It's so close, especially if @Event a = [Occ a]@. I'd need a @(b -> Maybe (Occ a, b)@. * On my trip, I got more clear about my quest of eliminating all IO in Haskell programming, and the relationship of that quest to composability. Make composition the central issue. Explicit IO interferes with that goal. Make sure that all IO is completely separated from functionality in an "interface wrapper" that can be altered and/or removed during composition. In conventional programming style, interface and functionality are entangled in code, so composability is lost. * Article: [[Computers, Networks and Education| http://www.squeakland.org/school/HTML/sci_amer_article/sci_amer_01.html]], by Alan Kay. I love Alan's high-level vision and orientation. And yet, his systems inflict sequentiality and discretness (in time and space). * Note about equality and fairness to NVC-cert group. +++> For me, the notions of equality and fairness lack are neither strategies nor needs. They lack the properties that make strategies and needs useful. Stategies specify an action. Needs are directly connected to surviving and thriving and are universal. Instead, notions like equality and fairness (and most uses of respect and consideration) are closer to externally-rooted moralisms, which I understand as an attempt to survive and thrive through coercion. For instance, if I have no food and you have plenty, I might think in terms of fairness, but really what I want is food. Similarly, if I don't have the right to vote and you do, I might think I want equality, but really I want to survive and thrive, and I'm more likely to if I have some influence on policy. I've listed many other such popular need-words at http://evolve.awakeningcompassion.com/?p=18 , suggesting that they're typically used as "vague demands". I don't know how to measure the depth of a need. For me, some ways of thinking (and associated words) lead my heart to openness, my mind to clarity, and the path to joy & fulfillment more illuminated. Other ways of thinking leave my heart untouched or constricted, my mind confused, and the joyful path narrowed. Ideas of equality & fairness are in the latter camp. They lead me away from living, direct connection with myself or another person, to moralisms and external standards. On reflection, I don't have any notion of equality or fairness that makes sense to me. I'd like to hear if anyone else has one that makes sense to them. === * Another response on "consideration". +++> Hi John, I often hear the term "consideration" as a need from people learning or teaching NVC, and always it comes across as a "vague demand" rather than as Life-rooted. Mainly, I hear it as NVC-speak for "You should focus on what I want you to", or "You're too self-centered". If you respond to your significant other's expression of need for "consideration" by guessing other needs, she may hear (and perhaps accurately) "you're doing NVC wrong". You might first share some honesty, such as "I'd like to get more connected and inspired here. Could we explore some more needs and see if that helps me?" === * Another one, on "racism". +++> I haven't been satisfied with any of the suggestions for delving into the beautiful, life-serving core of racism. Nor have I been inspired to much clarity and connection with the question myself. Maybe the reason is that the discussion and my own thinking have kept racism at a distance -- in other words seeing it as something going on in other people but not ourselves. Perhaps getting to the lovely heart of racism will require owning and sharing our own racism. Here's a go for myself. This summer I visited my son Jake in Chicago. He was living in a predominantly poor neighborhood. I was on edge for the first day or two, worrying about my car getting broken into or getting mugged at night. And I noticed that almost everyone in the neighborhood looked Mexican. So what can I say about my own fear and negative expectations in that setting? I guess I imagine these people struggling for survival while seeing others like myself with much easier lives. I imagine that after a while, out of desperation, the people struggling give up on the dream of being cared for and supported by society and decide to balance matters by taking care of themselves at others' expense. So, perhaps racism (classism really, but class & race are correlated) in me is a fear borne of the current out-of-balance society, in which my group's needs get taken care of much better than some others. Specifically, from fear that a shift toward balance will come at my group's expense. Going deeper, I notice that I really enjoy giving to those more on the survival edge than I am. So perhaps my fear is really that I'll be taken from, without choice in the matter. My request is for you to search yourself for racism and share what you find. === * Abstract for paper on Dense work: ++++> Although functional programming languages support programming with functions as first class values, they also penalize the programmer for doing so. Lazy evaluation caches components of //data structures// but not functions. One partial solution is automatic memoization of functions, but the memo table is a one-size-fits-all data representation that lacks the efficiency of more specifically targeted representations. Moreover, memoization is not very useful for functions over continuous domains, in which exact cache hits are rare. This paper proposes another approach: a countably infinite data structure representing a function by a //dense// collection of samples. Sampling yields an infinite stream of successive approximations instead of a single precise value. Where the sampled function is continuous, the stream converges to the function's value at the sample point. More powerfully, one can sample over an //interval//, yielding an approximation stream converging to the average value over that interval. This interval interface is particularly useful for rendering images (e.g., from 2D or 3D models) and animations into discrete space and time samples (pixel arrays and frame sequences). For these uses, interval-based sampling yields visually smooth, anti-aliased results, including motion blur. In interactive settings, with bounded computation time based on frame rate and machine load, approximation streams enable a simple means of progressive refinement. An initial rough approximation comes up quickly and is refined to smoother and more accurate versions as time allows. Moreover, the representation shares a great deal of work between anti-aliasing views and zoomed-in views, as well as between an image and slightly moved or scaled variations, as is common in interactive exploration. === Possible working title: "Lazy continuous functions". Not entirely accurate, since it applies to non-continuous functions over continuous domains, but makes stronger convergence guarantees about continuous functions. * More Dense ideas: ++++ * Progressive accuracy in computation, e.g., fixnum, floats, doubles, bigfloats. * Use progressively more terms from a Taylor series. * Relax the sampling pattern. Given an interval //I//, compute (an approximation of) //f(x)// for some //x// in //I//, but not necessarily for the midpoint of //I//. * What properties are required of //f// to insure that the interval approximation stream converges to the average value? For instance, //f// could yield 0 for rational (or algebraic) numbers and 1 for irrational (or transcendental). In my implementation, every sampling would give 0, although the function is equal to 1 almost everywhere. === * What's a simple, efficient data structure for an infinite 1D array, i.e., quick index-based access, but sequential access is not important. One idea: an infinite binary tree. How to convert an index to a path in the tree? Use the bits from the number's binary representation. Except for zero, we can assume that every number has 1 as its msb. So to encode a natural number n, form the sequence of bits of n+1, drop the msb, and use the result as the tree path. Let @bits@ computes these bits, starting with the least significant and dropping the msb 1. Examples: @path 0 == bits 1 == []@, @path 1 == bits 2 == [0]@, @path 2 == bits 3 == [1]@, @path 3 == bits 4 == [0,0]@, @path 4 == bits 5 == [1,0]@. The tree fills up a level at a time, as the inverse of a breadth-first enumeration. Is index-based sampling really enough? I also want to build these indexable streams (infinite arrays) inductively. Specifically, each interval in my Dense structure will have an infinite array of approximation streams of regular arrays. The //i//th stream contains 2^^//i//^^-element arrays. First build a regular inductive stream and then convert to an indexable stream. How to convert? Lots of [[discussion on #haskell| http://tunes.org/~nef/logs/haskell/07.11.12]], starting at 19:45:11. * See chapter 9 of Chris Okasaki's "Purely Functional Data Structures" for related "numerical representations". * Dense: ++++ * Instead of a single 2D array per view, make a bunch of them, all with power-of-two sizes. Nice match for efficient (required?) power-of-two sizes of OpenGL texture maps. Simplifies my implementation work as well. Come up with a scheme for managing texture memory. Look at computing the textures concurrently. * Besides power-of-two size, have the starting position be a multiple of the size (as in my @Data.PInv@ module). * Here's an idea for exploiting symmetry. Change back to the simpler sampling pattern that only covers positive domain values. Use a two-halved representation: //f x// and //f (-x)// for positive //x//. Treat symmetric functions specially, by sharing a single zipper. I guess I'd have to have a flag in there in order to preserve sharing. Or an alternative constructor with only one zipper instead of two. For instance, define @abs@ with explicit sharing and then @fmap@ functions over it. Make sure that @fmap@ and friends (@<*>@) preserve the sharing. Maybe this trick can be generalized to handle other simple relationships to avoid redundant computation. * The progressive approximation arrays for a given image patch (//2^^n^^// by //2^^n^^//) don't have to have all the same size. I could start with 1-by-1 and continue to subdivide until I get to roughly screen resolution (rounded down to a power of two), and then refine quality at that level. Probably not worth doing, however. === * Art Mills fixed the spring today. Both the overflow pipe, which Joseph & I worked on, and the outflow pipe at the bottom of the cistern were blocked. The overflow pipe is only about 8 inches long and was jammed into the earth. I imagine it used to be exposed. I think Art added some length so it won't get covered up again. The outflow pipe at the bottom of the cistern was also plugged up. Art had to cut that pipe in order to get it cleared out, which he accomplished. Water is now flowing freely. I'll check the tank level in the morning. The next steps Art recommended are to (a) clean the cistern well so it doesn't reclog (much easier now that it drains continuously) and (b) replace the rotten framing of the cistern door. He left the pipes uncovered, so we'll be able to see how it's all put together. * Reading up on OpenGL. * Adding interactive pan & zoom to the Eros image viewer. Panning was easy. For zooming, two challenges arise: ++++ * Distinguish between drag and shift-drag. * When zooming, I think I want to zoom around the mouse-down point. That constraint makes for a somewhat tricky reactive interaction. I've implemented this interaction before, including in Pan and in the two-handed navigation Fran app. I'm uncomfortable about going further with the reactivity algebra I have set up. === * [[Ontology is Overrated: Categories, Links, and Tags| http://www.shirky.com/writings/ontology_overrated.html]], by Clay Shirky. Quotes: ++++ > One of the problem users have with categories is that when we do head-to-head tests -- we describe something and then we ask users to guess how we described it -- there's a very poor match. > The reason we know SUVs are a light truck instead of a car is that the Government says they're a light truck. This is voodoo categorization, where acting on the model changes the world [...]. > It comes down ultimately to a question of philosophy. Does the world make sense or do we make sense of the world? If you believe the world makes sense, then anyone who tries to make sense of the world differently than you is presenting you with a situation that needs to be reconciled formally, because if you get it wrong, you're getting it wrong about the real world. === * Thinking yet again about reactivity and interactivity. I can probably get rid of the IO in DataDriven and Phooey if I go to an arrow-friendly, Yampa-like model. Remember the principle of //internal// reactivity (from [[2007-10-12]]). (Also revisit the single-occurrence model of events.) How does the mouse and keyboard get converted into model sensory info? There are probably two answers: an outer one and an inner one. The outer answer says how to package up the behavior with an input signal to create an output signal (using Yampa terms). The inner answer says how to alter the input compositionally. * Implemented panning & zooming in my Eros image viewer. Came out elegantly. * How will composition work in my dense representation? Compose functions or compose dense samplers? I could keep the function together with the dense sampler or discard the function. Idea: discard the function but have rewrite rules that composes the functions. ++++ \begin{code} dense :: (Double -> a) -> Dense a {-# RULES "dense <*>" forall ff af . dense ff <*> dense af = dense (ff <*> af) #-} \end{code} === What will ghc's rewriter do when a @dense@ application gets used more than once? Find out. * Hm. When defining @<*>@ on @Dense@, I'd like to encode shape-matching in the types. This issue comes up for the @BinTree@ context (@Ctx@), which records whether a @BinTree@ is the left or right child. Also with the resolution streams, in which the arrays have matching resolution. * Recall Chris Okasaki's [[From fast exponentiation to square matrices: an adventure in types| http://www.citeulike.org/user/conal/article/1857246]]. He used a nested type for square matrices of size //2^^n^^// and then extended to general square matrices by encoding the fast exponentiation algorithm into types. Try to do something similar. Start with my resolution streams. Currently: ++++ \begin{code} -- nth element has size 2^n type ResStream a = Stream (Array Int a) \end{code} === Here's a variation that statically enforces the size constraint: ++++ \begin{code} data ResStream a = RS a (ResStream (a,a)) \end{code} === * Equivalently, here are binary trees using nested types: ++++ \begin{code} data Pair a = Pair a a instance Functor Pair where fmap f (Pair a b) = Pair (f a) (f b) instance Applicative Pair where pure a = Pair a a Pair f g <*> Pair a b = Pair (f a) (g b) data BinTree a = BinTree a (BinTree (Pair a)) instance Functor BinTree where fmap f (BinTree a p) = BinTree (f a) ((fmap.fmap) f p) instance Applicative BinTree where pure a = t where t = BinTree a ((pure.pure) a) BinTree f fp <*> BinTree x fx = BinTree (f x) (liftA2 (<*>) fp fx) \end{code} === There's a reference to a similar type in [[Perfect trees and bit-reversal permutations| http://www.citeulike.org/user/conal/article/1925076]]. See also [[Numerical Representations as Higher-Order Nested Types| http://www.citeulike.org/user/conal/article/1925085]]. Both by Ralf Hinze. * Imitating generic tries, define a type family for dense function representations. Both are efficient data representations of functions. Is there a common generalization? * Abstract the pairing out of binary trees. Top-down and bottom-up: ++++ \begin{code} data TreeT f a = TreeT a (f (TreeT f a)) data TreeB f a = TreeB a (TreeB f (f a)) \end{code} === Binary trees: ++++ \begin{code} type BinTreeB = TreeB Pair type BinTreeT = TreeT Pair \end{code} === The @Functor@ and @Applicative@ instances are exactly the same for @TreeT@ and @TreeB@. For instance, ++++ \begin{code} instance Functor f => Functor (TreeT f) where fmap h (TreeT a p) = TreeT (h a) ((fmap.fmap) h p) instance Applicative f => Applicative (TreeT f) where pure a = t where t = TreeT a ((pure.pure) a) TreeT f fp <*> TreeT x xp = TreeT (f x) (liftA2 (<*>) fp xp) \end{code} === See [[De Bruijn Notation as a Nested Datatype| http://www.citeulike.org/user/conal/article/1930769]]. We can also simplify the instances by using explicit type composition. ++++ \begin{code} data TreeT' f a = TreeT' a ((f O TreeT' f) a) instance Functor f => Functor (TreeT' f) where fmap h (TreeT' a p) = TreeT' (h a) (fmap h p) instance Applicative f => Applicative (TreeT' f) where pure a = t where t = TreeT' a (pure a) TreeT' f fp <*> TreeT' x xp = TreeT' (f x) (fp <*> xp) \end{code} === Similarly for @TreeB'@. We could do many more variations: ++++ \begin{code} -- Rose trees: top-down & bottom-up type RoseT = TreeT' [] type RoseB = TreeB' [] -- Possibly finite, binary trees: type FinBinT = TreeT' (Maybe O Pair) type FinBinB = TreeB' (Maybe O Pair) \end{code} === * Goal: make a version of bottom-up binary trees with low-level arrays representing the levels, in exactly the format that OpenGL can consume for efficient texture generation. Idea: use the @binary@ library. Another idea: type families. * See Chris Okasaki's [[Purely Functional Random-Access Lists| http://citeseer.ist.psu.edu/okasaki95purely.html]] and Ralf Hinze's [[Bootstrapping One-sided Flexible Arrays| http://citeseer.ist.psu.edu/hinze02bootstrapping.html]]. Maybe I could use one of them as the core piece of my dense representation. * Remember sjanssen's paste: [[a lazy trie| http://hpaste.org/3839]]. * Reply to Leaf P on separating IO and pure computation: +++ Here's a re-rendering in Haskell, to make the pure-vs-IO disinction clearer. I've generalized a bit, to work on any readable input type and any showable output type. \begin{code} computeIO :: (Read i, Show o) => (i->o) -> (i -> Bool) -> IO () computeIO f done = loop where loop = do putStr "next value: " i <- fmap read getLine putStrLn$ "result: " ++ show (f i)
unless (done i) loop
\end{code}
I'd like to get at an essence behind the imperative code, that is more composable, has no accidental sequentiality, and from which the imperative code can be derived as a sort of compilation or trace.

A functional event/stream model seems promising to me.  I suggested one on the whiteboard, namely a function from one lazy list to another, but neither of us was really satisfied.  A more composable approach would perhaps be to add timing info to the stream model, yielding functional events (as in functional reactive programming).  The user produces an event stream of integers, and the code produces two event streams: ready (prompt), and a computed square.  Thus the type:
\begin{code}
computeE :: (i->o) -> (i -> Bool) -> EStream i -> (EStream (), EStream o)
\end{code}
Semantically, an EStream is just a stream of time-stamped values.

Note that this compute interface is substantially more composable than the imperative version:
* The prompt and output labeling are factored out and may be chosen later.
* Because we're using values (i and o) instead of human-oriented encodings (strings), we can compose conveniently and efficiently with other producers & consumers.
* Because we've eliminated IO, these compositions can be fused (deforested), eliminating intermediate streams.
* For the same reason, input & output can work in a variety of settings besides terminal IO, including GUIs and network transport.

There's also an isomorphic type that joins the prompt & compute events streams into a single sum-valued stream:
\begin{code}
computeE' :: (i->o) -> (i -> Bool) -> EStream i -> EStream (Either () o)
\end{code}
which is suggestive of a high-level abstraction of stream processors:
\begin{code}
computeSP :: (i->o) -> (i -> Bool) -> StreamP i (Either () o)
\end{code}
* newsham pointed me to [[Rc -- The Plan 9 Shell| http://plan9.bell-labs.com/sys/doc/rc.pdf]] and [[Reaktor| http://en.wikipedia.org/wiki/Reaktor]] as related.
* Installing Ubuntu on my old laptop.  Misc notes: ++++
*  My wireless card is recognized but isn't connecting to my wap
*  Installing emacs.  There are several choices, including "emacs22" and "emacs22-gtk".  Trying the first one ("sudo apt-get install emacs").  Try the second one later.
*  [[Ubuntu 7.10 (Gutsy Gibbon) guide (wiki)| http://ubuntuguide.org/wiki/Ubuntu:Gutsy]]
*  Edited my list of repositories, via the [[manual method| http://ubuntuguide.org/wiki/Ubuntu:Gutsy#Manual_Method]].  Now I see I could have used the [[Menu Method| http://ubuntuguide.org/wiki/Ubuntu:Gutsy#Menu_Method]].
*  Yoiks!  My satellite bandwidth monitor shows I slurped 507MB today.  The graph looks like I'm over my 7-day 1GB limit, which is punishable by dial-up speed.  The rolling summary says I've used only 481MB for the last 7 days.  Hm.  I really have to watch it.
===
* I posted note for my technical blog about the techtalk, and dons [[posted it to prog.reddit| http://programming.reddit.com/goto?id=6135x]].
* OpenGL: ++++
*  Found [[this note about texture caching| http://www.geocities.com/SiliconValley/Park/5625/opengl]]: "in version 1.1 and higher of the OpenGL spec, we can use a method for creating texture objects, which in most good drivers, should be cached and uploaded/downloaded from card memory automatically. The days of manual texture caching are over."
===
* Performance study (-ddump-simpl) & tuning on my OpenGL-based Pan-like code.  I eliminated boxing of number parameters.  Still had lots of allocation, which turned out to be from the "floor" function.  I defined my own, using truncate, which rewrites to a primitive for Float->Int, and I got a ghc panic.  Narrowed down the problem and reported as ghc bug ticket #1916.

* Starband smack-down!  While getting Ubuntu updates and watching my Google tech talk, I went 500MB over my 1GB 7-day rolling download limit.  Now I'm dropped to 100Kbps (from 512Kbps max) until I get down to 750MB, which will be three or four days if I'm very frugal.  I tried calling to beg, but they're closed for Thanksgiving.
* When I have more bandwidth, check out [[Don't fear the Monads! (functions, composition, monoids, monads)| http://programming.reddit.com/info/617p6]].
* GHC bug #1916 (see [[2007-11-21]]) got fixed quickly.
* I called Starband tech support about having gone over my limit and getting bounced down to 100kbps.  I talked with Mario, explained best I knew what had happened, and he reset it for me.  He also told me about down2home, made by [[JITServ| http://jitserv.coolfreepage.com]], which monitors and presents data on download & upload.  Will be very helpful.
Related problem: calculate a list of the averages so far. Now make sure it works for infinite lists. You'll probably want a one-pass algorithm. :)

Extra twist: assume fairly low precision arithmetic, and keep the computed numbers all within the range of sizes of the values seen, to avoid overflow or accuracy loss. Particularly, make sure that if the input numbers are all non-negative, then the auxiliary values are also.

These requirements come up in graphics computations, where pixel values are often represented as three or four 8-bit fixed point, non-negative numbers (24-bit color with optional transparency). Anti-aliasing can be done by sampling the underlying continuous image at several points within a pixel and averaging the results. By calculating a stream of averages-so-far, the results can be shown as they come available. That way, rough results come up quickly and then get nicely smoothed as you watch. For example, see [[Pajama| http://conal.net/Pajama]].
===
* [[Postmaster ESMTP Server| http://postmaster.cryp.to]]: a super-configurable mail server in Haskell.
* Progress with OpenGL.  I'm very happy with the pixel sampling loops.  With the image @const True@, I get 100 Mpix/sec.  With @rings@, I get 2 Mpix/sec.  It does square roots and allocates, due to @floor@.  I'll stop using @floor@ when I get a fixed GHC that doesn't panic on @truncate@ with optimization.
* Dense:  ++++
*  I don't know if I can get good performance if I start with single-pixel arrays and recursively combine into larger.  Try something very simple: use textures of a fixed array size, say 128x128 or 256x256.  For each sampling resolution, have an @IntMap@ (Patricia tree) of @IntMap@s of approximation streams of blocks.
*  Patricia trees may well be a sub-optimal data structure, considering how restrained an interface I use.  I think I have a sort of comprehension (@:: (Int -> a) -> IntMap a@) and the inverse (@:: IntMap a -> (Int -> a)@).  Hm.  Really, I use the composition of these two, which is just an @Int -> a@ memoizer, @intMemo :: (Int -> a) -> (Int -> a)@.  I could even extend to @Integer@ or something polymorphic.  I could even use the generic @memo@ or a trie.
*  For basic sampling, make a function that takes an image and renders a unit-square section of it (from (0,0) to (1,1)) at a fixed resolution.  For the different sections and resolutions, just scale and translate the image and pass into the unit sampler.  Make sure that samplings never use the same points, so that each level contributes new information for anti-aliasing.  How?  Sample at the centers of the squares within the unit square.
===
* [[Efficient tree data structure for maps from Int to somewhere| http://haskell.org/haskellwiki/Memoization]].  Infinite binary tree indexed by little-endian binary representation.
* [[Moebius Transformations Revealed| http://www.youtube.com/watch?v=JX3VmDgiFnY]].  Lovely!  And note the [[music source| http://www.musopen.com]].
* Here's a simpler way to think about my Dense representation: write as a recursive function, and then memoize (@memoFix@).  The function takes an integer size exponent //r//, integer coordinates of a square //(i,j)// (with lower-left corner //(2^^r^^*i,2^^r^^*j)//), and samples a given function //f//.  I'd been thinking of my approach as an alternative to memoization for functions of continuous domains.  I like that it can work via memoization over a related discrete domain.
* Oops: there's a conflict in my current plan.  I want to compute largish blocks at a time (say 128x128), //and// i want each new sampling for the approximation stream to take about the same time to produce.  My previous plan had been to produce sample blocks of size //2^^r^^// square for //r = 0, 1, ...//, for each sub-square.  The //(r+1)^^th^^// block comes from combining the //r^^th^^// block for each of the four sub-squares.  In this scheme, only the //0^^th^^// blocks, with only one sample each, are sampled from the function itself.  On the other hand, it does give me a regular succession of resamplings for anti-aliasing.
* Here's an idea for resolving this dilemma.  Stick with a fixed block size, but reduce the block size to something like //32x32// pixels.  Small enough that we needn't bother avoiding computing some extra.  Oh.  Doesn't really solve the problem.  With a fixed block size, I don't know how to get the gradual anti-aliasing //and// share computation with the subdivided squares.
* Besides Dense, what other ideas/projects do I have going? ++++
*  Alternative to IO.
*  FRP with modular interaction
*  Data-driven without IO (probably related to previous)
*  FRP via threads & blocking (RNF)
*  Eros: ++++
*   How to animate fusion?
*   Code generation
*   Polymorphism
*   Recursion
*   Editing
===
===
* A new idea for my Dense dilemma.  Compute my samplings differently, depending on whether inspection happens bottom-up or top-down (i.e., whether we render a square before or after its sub-squares).  Note that the second through fifth samplings for a square are closely related to the first samplings for each of the four sub-squares.  They're rearrangements of each other.  The problem is that I don't know which one to compute directly and which indirectly, in terms of the other.  If I'm working top-down, I'll want to compute samplings 2-5 and rearrange them into samplings of the four sub-squares.  That way, I get intermediate results at regular intervals for the square being rendered.  Each of four sub-squares will get its first sample very quicky, by rearrangement.  On the other hand, if I'm working bottom-up, I'll want to compute just the visible squares, not all squares in the parent, so compute samplings 2-5 and rearrange them into samplings of the four sub-squares.  Now here's the idea.  Define an encapsulation that holds two lazy values, each described (a) directly, and (b) cheaply as a function of the other.  Internally, whichever value is accessed first is computed directly, and the other indirectly. ++++
\begin{code}
vp :: a -> b -> (b->a) -> (a->b) -> (a,b)
vp a b af bf = unsafePerformIO $mdo refa <- mk a b af bf refa refb refb <- mk b a bf af refb refa return ( unsafeRead refa , unsafeRead refb ) where unsafeRead = unsafePerformIO . readIORef mk :: c -> d -> (d->c) -> (c->d) -> IORef c -> IORef d -> IO (IORef c) mk c d cf df refc refd = newIORef$ unsafePerformIO $do writeIORef refc c writeIORef refd (df c) return c \end{code} === Alternatively, I could do a purely functional implementation with a different interface. Make an opaque pairing, and have the two accessors return a new pairing as well as value. * [[Shape Modeling and Computer Graphics with Real Functions| http://www.hyperfun.org/F-rep.html]] * When people adopt a new programming languages, it's usually because either (a) the language fits their inclinations, or (b) the language works well for an app they care about. What would the latter be for NVC? We say it's joyful giving and deep needs met fully. What's a more concrete app or a more concrete expression of that app? * [[Plugging a Space Leak with an Arrow| http://www.cs.yale.edu/~hl293/download/leak.pdf]] * Here's another angle on my Dense puzzle. ++++ Each 2D sampling array can be built in a five different methods: ++++ * Directly sample the function * Stick together four lower-resolution arrays for the child intervals * Extract one quarter of a higher-resolution sampling of the parent interval * Weave together four lower-resolution, offset arrays for the given interval * Extract one quarter of the unweavings of a higher-resolution sampling of the given interval. === Assign a dynamic cost estimate to each method, in a data-dependency network (e.g., using [[DataDriven| http://haskell.org/haskellwiki/DataDriven]]). The cost of one of these multi-way values is (dynamically) the minimum of of the costs of the methods, plus a bit for overhead. Extract values greedily, i.e., use the current minimum-cost method. When a value gets computed, the multi-way cost changes to zero (or a small epsilon), resulting in recomputation of all related costs. Some questions: ++++ * Does cost refer to the cost of reducing to WHNF? * How to combine these costed values? Can I use @Applicative@? Assuming a strictness, the cost of @f a@ is the sum of the costs of @f@ and @a@, and the cost of application. The latter cost would depend on some static information about @f@ and @a@. In particular, array size determines the cost of splitting & combining, and influences the cost of sampling. * For sampling, the function being sampled has a bounded cost per sample, and the overall cost is the single-sample cost times the size of the array, plus some overhead. We might measure the per-sample cost as we go, or construct the cost while constructing the function itself. * Maybe I could use the @DeepArrow@ algebra to build up costs, along with functionality, types, code, UI, etc. Importantly, the cost of @dup@ is nearly zero. * How to even talk about === Maybe also manage memory costs of keeping values in computed form. === * Try something simple for now with my images: a memoized function from resolution and region index to a single texture block. That much is already better than what I have in Eros. I can even do some anti-aliasing simply by choosing bigger textures than necessary and shrinking them down. * OpenGL manages texture memory automatically, so I don't have to. See OpenGL "red book", chapter 9. +++> If texture objects have equal priority, typical implementations of OpenGL apply a least recently used (LRU) strategy to decide which texture objects to move out of the working set. If you know that your OpenGL implementation has this behavior, then having equal priorities for all texture objects creates a reasonable LRU system for reallocating texture resources. === * How to render from sampled chunks: ++++ * Given: cumulative scale & translation, window width & height (in pixels). * Return: a composite rendering action, combining texture & and quads. * Use direct coordinates everywhere. No transformations. Except, perhaps, for moving the origin to the center of the window. * How to enumerate the relevant 2D intervals? === * Starting a new Haskell project: "Piq". It's functional images yet again. I want it to be as fast as possible while using GHC compilation and my new approach to infinite, continuous images. ++++ * Description: //Piq// is a yet another take on continuous, infinite images, in the spirit of [[Pan| http://conal.net/Pan]]. Unlike Pan, Piq (a) relies on a Haskell compiler for optimized compilation, and (b) uses a lazy infinite /data/ representation, in order share lots of work between views. * Use @Complex Float@ rather than @(R,R)@ or @Vector@. Play with the many operations that come along with @Complex@, particularly the numeric instances. @Complex@ keeps its components unboxed. * Yum -- using @Complex@ works out //very// nicely in simplifying basic definitions. ++++ \begin{code} -- Warp an image by /the inverse/ of a warp warpIm :: Warp -> Filter a warpIm w = inImage (. w) translate :: C -> Filter a translate d = warpIm (subtract d) uscale :: R -> Filter a uscale s = warpIm (^/ s) rotate :: R -> Filter a rotate theta = warpIm (cis (-theta) *) \end{code} === Oh! Better yet: ++++ \begin{code} rotC :: R -> C -> C rotC theta = (cis theta *) translate = warpIm . (+) . negate uscale = warpIm . (*^) . recip rotate = warpIm . rotC . negate \end{code} === Hah! Lovely! Other parameterized spatial filters can follow the same formula. Hm. Maybe there's a nice abstraction to be teased out. === * Haskell programming tip: use {{{ghc --supported-languages}}} to get the list of supported {{{LANGUAGE}}} pragma options. * Note on needs: +++> Hi Matthew, Thanks for sharing your experience with us. I'm especially touched with the exchange in the section your called "Dancing between empathy and expression". Of the four needs you listed, I connect with and believe safety. The others I hear more as in the realm of strategies or even "vague demands". (Please see "Distinguishing needs from vague demands" and related posts at the "NVC Evolves" blog -- http://evolve.awakeningcompassion.com/ .) I suspect that the others ("to be heard", "understanding", and "Trust") all are your strategies to address safety and perhaps some other of your needs not listed. I wonder if "to be heard" and "trust" are really NVC-sounding ways to avoid saying you wanted prompt obedience, and I wonder if you might like prompt obedience in order to serve your needs for safety, with enough of your energy and focus left over to meet your needs for making life wonderful for yourself and those around you. About "trust" in particular, I'm guessing you mean what I call "trust that", as opposed to "Trust". The former is a conditional strategy, while the latter is unconditional state of presence & peace. Please see "Trust vs Trust that ..." and "Trust that ..." in the NVC Evolves blog for more description. I'd love to hear back from you (Matthew) and others. For instance, do you get more easily into a state of clear mind and open heart with the "safety" issue, and more of a muddle and a constriction with the other three (as do I)? Do you find that there were some old authority/domination habits masqueraded by NVC language? Warmly & gratefully, - Conal === * Hm -- maybe move my @Image@ type from namespace @Graphics@ to @Data@. After all, I want people to think of images as values. Really -- everything could go into @Data@ for this same reason. Maybe save it for things with simple semantics. * From the GHC.Float.hs: ++++ \begin{code} instance RealFrac Float where ... floor x = case properFraction x of (n,r) -> if r < 0.0 then n - 1 else n \end{code} === This code is not unboxing-friendly, as it uses @properFraction@, which returns a pair of numbers. Even if the pair itself gets unboxed, its elements don't. How about this code instead: ++++ \begin{code} instance RealFrac Float where ... floor x = if x < 0 then n else n-1 where n = truncate x \end{code} === Likewise for @ceiling@. I'm new at Haskell optimizing, so I might be missing something subtle. Although @truncate@ is also defined via @properFraction@, there's a rule that replaces the definition: ++++ \begin{code} {-# RULES "truncate/Float->Int" truncate = float2Int #-} \end{code} === * Lots of discussion [[today on #haskell| http://tunes.org/~nef/logs/haskell/07.11.28]]: using DataDriven (extended to an language of evaluation policies) for managing software (9:36); simplicity & generality in design (10:43); pedagogy & "the IO monad" (20:43). * Reply to sigfpe's post [[ The IO Monad for People who Simply Don't Care| http://sigfpe.blogspot.com/2007/11/io-monad-for-people-who-simply-dont.html]]: ++++> Hm. I think the "command/expression distinction" is a confusion of semantics & syntax. I'd say that Haskell "expressions" are used consistently to denote a wide variety of (immutable) values, from booleans & numbers to streams & trees to imagery & animation, to "commands" (aka "computations", "stateful computations", "actions", "commands", IO values) & beyond. All are denoted (expressed) via expressions, and all are immutable values. The only essential difference between IO and, say, list is that the semantic interpretation we give IO is much more complicated (intractably so) than the one we give lists. I also have a quibble with the common practice of referring to IO as "the IO monad". Monadness is no more essential to the IO type (constructor) than to list. Bringing in monads is just a convenient structuring technique. The idea of capturing imperative computations in a type of (immutable) values is lovely. And so is the general pattern we call "monad". I'm worried that people won't understand that these lovely ideas can be used separately, and that (like infinite lists), they happen to be usable together. === * Added fractional (@RealFrac@) sliders to Phooey & TV. * Very long #oasis chat with vincenz and rici. Saved. One bit from me: ++++> For me (what I care about), syntax and implementation are always in service of semantics. I form expressions in order to denote, and I want implementations so i can get a more direct glimpse of the denotations. === And here's a story from rici:+++> there's a famous canadian story about a baseball tournament between various original nations of ontario (i.e. indigenous groups). now, some of these are traditionally hunter-gatherers, and others are traditionally pastoralists. there's a big difference between the social norm with respect to dinner which has to do with the fact that agricultural products are stored. now, in some cultures the host is //obliged// to put more food on the table than the guests wish to consume, in order to demonstrate that arbitrary consumption is invited. in other cultures, the guest is //obliged// to eat everything which is presented, in order to demonstrate that it was good. clearly, these two cultural values are incompatible. so the night before the tournament, the hosts (of persuasion A) threw a dinner party for the visitors (of persuasion B). consequently, the visitors were required to gorge themselves on an abundance of food which stretched the resources of the hosts. the next day, the visitors got clobbered, since they were all suffering from overconsumption, and they took it up with the tournament organizers, claiming that they had been, in effect, poisoned. meanwhile, the hosts had complained to the tournament organizers, claiming that they had been put out of pocket by the massive greed of the visitors. now, there is no right or wrong to any of this. everyone did what came naturally to them, and it was only a problem because they didn't understand each others' rules. === * Found the source of my Piq crashing bug: i was changing texture maps in the middle of a @renderPrimitive@ call. I guess that's taboo. * Getting close with the graphics details. But the memoizing trick isn't working. It just gets stuck, and I don't yet know why. Oh -- negative numbers!! Fixed. I'm still recomputing, and I think I know why: sources don't cache automatically. Given @f <$> a <*> b@, if @b@ changes, @f <$> a@ will get sampled, which means //re-applying// @f@. Oog. Type the text for 'New Tiddler' * On taxonomy ++++ * Reply to the Yampa list on ontology: +++> I don't know that there's a clear distinction to make between DSELs and other types. That's the point of the "E" in DSEL. One might well call Data.List and (especially) IO DSELs. While Arrow & Monad (and Applicative) were placed into Control, I don't agree that they fit there, as they're equally applicable to Data. The very general nature of type classes makes it inevitable that they will cut across our top-level categories of Control, Data, Graphics, etc. So I'd suggest moving them to a new top-level namespace "Class" (with forwarding modules). I've come to think of "Data" and "Control" as meaning "functional" and "imperative", respectively. Or perhaps as "tractable (denotational) semantics" and "intractable semantics". Or as having lots of useful properties and composability vs not. If Yampa fits those descriptions, then Data.Yampa. Or maybe give up on taxonomy, which is probably an unworkable proposition anyway [1], and go with the package name as the namespace name, plus Hackage tags for more description. [1] Ontology is Overrated: Categories, Links, and Tags http://www.shirky.com/writings/ontology_overrated.html === * A related note, unused: +++> I've been feeling increasingly uneasy about Haskell's hierarchical module system. It's become "the elephant in the room" for me, and I wonder for whom else. The elephant is a big one, namely: taxonomies don't usually work, and particularly in highly expressive languages such as Haskell. See [[Ontology is Overrated: Categories, Links, and Tags| http://www.shirky.com/writings/ontology_overrated.html]]. Until we find a working alternative to taxonomy, I suggest the following scheme: * A new top-level category called "Class" for type classes. Move Monad, Applicative & Arrow there, and probably many others. Create forwarding modules for backward-compatibility. * Interpret Data as "Functional" and Control as "Imperative". In deciding whether to place a data type into Data or Control, ask the question of whether the type's denotational semantics is as complicated as IO. (For instance, is IO visible in its external interface.) If so, place it in Control, and if not into Data. Maybe IO isn't the best cut-off point. I'd like to open up an exploration to get more clarity of what problems hierarchical modules solve whether there are alternatives that work as well for those problems without the problems that come with taxonomy. Tagging seems to be a great alternative to taxonomy in email (gmail), social bookmarking (del.icio.us etc), and blogging (Wordpress etc). But tagging works //with// ambiguity, yielding sets of results, while I think we're looking to === * Another, sent to the Yampa list, in reply to Henrik: ++++> > I could thus certainly live with a top-level Yampa! :-) But if not, I would, after having thought a bit more, prefer something like "edsl.yampa" to both "control.frp.yampa" and "edsl.frp.yampa". Of those four choices, "yampa" is the only non-arbitrary one, i.e., the only one that avoids crowning one of many aspects of Yampa as if it classifies the whole. it's simple and direct and adds no ambiguity on top of that imposed by the flat package namespace. It's also inconsistent with the common practices, but those practices are highly questionable. I'm going to experiment with using tags instead of categories in packages & modules. Cabal already has multiple tags (unfortunately called "categories") per package. We could start (multi-)tagging modules as well, via an informal pragma (stylized comment). The "ontology is overrated" article describes an evolutionary path from rigid taxonomy to flexible folksonomy (social tagging). Oh -- it occurs to me that the flexibility and friendliness of social tagging comes from decentralizing the tagging. Maybe better that the tags are external to content (Cabal specs and module sources). === * It also occurs to me that the use of name paths ("Foo.Bar.Baz") for uniquely identifying modules is poor man's hyperlinks. * Another reply to Henrik: ++++> > Instead, leave that to the author's of a module or family of modules: if they think it is useful to think of their work as an EDSL, and the EDSL view is a view they wish to push, well, then why not? Who's in a better position to judge? That's a traditional viewpoint. Shirky's ontology article offers a rebuttal that is for me very compelling. See his sections on [["Mind Reading" and "Fortune telling"| http://www.shirky.com/writings/ontology_overrated.html#mind_reading]]. === === * Events and data-driven computation: ++++ [[Yesterday| 2007-11-30]], I realized that my clever memoization trick is deftly foiled by @Source@'s lack of caching. Which gets me back to [[a recent idea for reactivity| 2007-10-10]] that caches for free. I think I'd want single-occurrence events, so here are so thoughts. ++++ An event occurrence is, semantically, a time/value pair: @semantics Occ a = (Time,a)@. But the time & value are often not knowable until (slightly after) that time. In other words, an occurrence describes information that may come from the future. I'll adopt the term [["future"| http://en.wikipedia.org/wiki/Futures_and_promises]] for now (though [[perhaps "promise" is a better fit| http://en.wikipedia.org/wiki/Futures_and_promises#Distinction_between_futures_and_promises]]). What can one do with a future? ++++ * //Force// it, which blocks until the future's time & value are known. ++++ \begin{code} force :: Future a -> (Time,a) \end{code} === Note the purity of @force@ (lack of @IO@). Also note that @force@ is the semantic function for futures. * Ask whether the future's time is earlier than a specified time. May block if the specified time is in the future. ++++ \begin{code} earlierThan :: Future a -> Time -> Bool \end{code} === * @Functor@ instance: ++++ \begin{code} force (fmap f fut) = second f (force fut) \end{code} === Hm. Note the similarity to the @Functor@ instance for @(,) Time@. * @Applicative@ instance: ++++ \begin{code} force (pure x) = (minBound,x) force (fmap futf futX) = (tf max tx, f x) where (tf,f) = force futf (tx,x) = force futx \end{code} === Wow -- looks just like @Applicative@ for @(,) Time@, if ++++ \begin{code} type Time a = Max a instance (Ord a, Bounded a) => Monoid (Max a) where mempty = Max minBound Max a mappend Max b = Max (a max b) instance (Ord a, Bounded a) => Monoid (Min a) where mempty = Min maxBound Min a mappend Min b = Min (a min b) \end{code} === === * @Monoid@ instance. I'd have guessed that I want the //earlier// occurrence, for a sort of @select@-like semantics: ++++ \begin{code} force mempty = (Max maxBound, error "it'll never happen, buddy") forall f. f earlierThan mempty == False force (futa mappend futb) | ta <= tb = pa | otherwise = pb where pa@(ta,_) = force futa pb@(tb,_) = force futb \end{code} === Or do I want the //later// occurrence, as in the @Applicative@ instance? Hm. Since @mempty@ must be the identity for @mappend@, a max-based monoid's mempty would have to occur at @minBound@. That occurrence time would suggest that the value could be accessed at any real time, which is not the case. * @Ord@ instance, giving an answer of "both" to the previous question, without the troubling need for @mempty@ in the max case. Use @min@ for the earlier occurrence and @max@ for the later. We probably want @<=@ as primitive. Unfortunately, there isn't an @Eq@ instance of the expected kind (without adding @Eq a@ to the context, which I don't want for the @Ord@ instance), and @Ord@ is set up as a subclass of @Eq@. * The @Monoid@ instance //does not// match the monoid instance for @(Time,a)@, so it breaks the trend set by @Functor@ and @Applicative@. Since we have @min@ instead, maybe punt @Monoid@. === === * An enchanting chat with rici on #oasis, including bi-directionality of time, which I wrote about on the plane on the way back from ICFP just a few weeks ago. Saved. I ordered two books he recommended: "Maps and Dreams" and "Dancing with a Ghost". * Chatted with Ken Greenebaum about djVu and scanning. The [[Fujitsu S510| http://www.amazon.com/Fujitsu-Scansnap-Color-Image-Scanner/dp/B000RUOW66]] looks pretty sweet, and there's a$50 rebate until December 31^^st^^.  The main drawback I see is lack of network-based scanning.
* Started working on a "semantic prototype" for futures that leverages existing instances.
* Submitted two library proposals.  Both came up in my "semantic prototype" for futures. ++++
*  [[Max and Min for Monoid| http://hackage.haskell.org/trac/ghc/ticket/1952]]
===
* Response to Wolfgang J: ++++>
> > It really is about a particular approach to programming. Hence the name! Well, that's my opinion at least.

> It is about a particular approach to programming reactive sytems.  And reactive systems are about control.

I agree with the first statement, but not the second.  I'd instead say that //imperative// reactive systems are about control, as with most other kinds of imperative systems, and //functional// systems, including reactive ones, are about values, i.e., data.

By the way, I distinguish between (a) functional programming and (b) imperative programming in a functional language.  For instance, using lists or FRP is the former, while using IO is the latter.  I'm not sure all Haskell users make this distinction.
===
* Note to Paul H: ++++>
By the way, I realize that I'm co-opting this Yampa list thread to for my own agenda of raising awareness of this taxonomy question.  I'm doing something similar in the #haskell IRC channel.  I decided to start pointing out elephants wherever I spot them.  My other favorite right now is the inherent problems of IO, namely abandoning tractable (denotational) semantics and composability.  I get the impression that many people think that the monad trick some lets one think and program imperatively and get the benefits of functional programming.

P.S.  If you're interested, perhaps we could collaborate this taxonomy thing as it relates to Haskell programming.
===
* Response to Henning Thielemann, asking what I want to do with my floating point infinities: ++++>
I'm using them for event occurrence times.  Or, from another angle, for times associated with when values can become known.  Pure values have time minBound, while eternally unknowable values (non-occurring events) have time maxBound.  Hm.  Now that I put it that way, I realize that I don't want to use existing minBound and maxBound if they're finite.  (My event types are temporally polymorphic.)  I'm mainly interested in Float/Double times, which have infinities in practice but apparently not guaranteed by the language standard.  I guess I'll either (a) bake in Double (temporally monomorphic) and rely on infinities not guaranteed by the standard, or (b) keep temporal polymorphism and add infinities to time parameter.
===
* More exchanges with Wolfgang J and with Paul H.
* Noodling over how to implement functional futures.  The [[old IVars| http://www.haskell.org/ghc/docs/3.02/users_guide/users_guide-4.html]] seem pretty close.  I did a simple implementation via STM.  Could a future be just an IVar?  To implement @fmap f iv@, make a new IVar @iv'@, spin a new thread that asks for the value @x@ of @iv@ and writes @f x@ to @iv'@.  Immediately return @iv'@.
* Ontonology discussion:  +++
> Wolfgang suggests FRP as the top-level for all things FRP.

> [...] At least it is consistent with the story
> we've been telling for a long time with FRP being used as the umbrella
> term, and Yampa etc. as instances.

> Of course, as Conal pointed out, it's never going to be perfect,
> but hopefully good enough.

Good enough for what?  How about we get clear on our goals for this decision and *then* implement them.  Here are some possibilities I've heard so far in related discussions on #haskell, as well as in this thread:
(a) Unambiguous for import from a client program
(b) Descriptive
(c) Browsable
(d) Consistent with other libraries
Please suggest more if you think of any.

For myself, (a) is a very strong goal and probably the only requirement.  I'm also very interested in (b) and (c), and for reasons Shirky describes, taxonomy/hierarchy is ineffective at best, compared with the alternatives of links, tags, and search.  And for those considerations, I see (d) do more harm than good.
===
> Am Dienstag, 4. Dezember 2007 10:30 schrieb Henrik Nilsson:
> [...]
> > but if it succeeds in grouping together all things FRP, including
> > Grapefruit, which Wolfgang didn't feel was a (E)DSL, and perhaps similar
> > cases, then yes.

> Good!  However, note that I wouldn’t want to place all of Grapefruit’s GUI and
> graphics stuff under FRP.Grapefruit.  FRP.Grapefruit would be for
> Grapefruit’s FRP core and for GUI and graphics things which are closely tied
> to the core (like graphic signals which are used to describe animations and
> are closely tied to the ordinary signals of the FRP core).  Modules which
> provide specific kinds of widgets like buttons, labels and treeviews, would
> go under Graphics.UI.Grapefruit.
Or you could just as well flip over the hierarchy and use Grapefruit.FRP and Grapefruit.Graphics.UI, since after all, FRP and UI are two aspects *of* Grapefruit.  In other words, Grapefruit is as much a sub-category of FRP as FRP is a sub-category of Grapefruit.  Each can be viewed as an aspect of the other.  Similarly for *all pairs of categories* in the two paragraphs above ((E)DSL, FRP, Graphics, UI, Grapefruit).  But "can be viewed as an aspect of" is not the same as "is exclusively contained in".  That difference is why tagging and linking works, and hierarchy doesn't.  Tagging and linking embody the multitude of relationships we find both in reality and in theory, particularly including relationships that emerge over time.  Hierarchy embodies only one relationship, which is containment, and that one relationship turns out to be fictitious in most circumstances.  Again, I highly recommend the Shirky article.  See particularly -- [[The Parable of the Ontologist, or, "There Is No Shelf"|http://www.shirky.com/writings/ontology_overrated.html#parable_of_the_ontologist]].
===
*  Another to Wolfgang: ++++>
On Dec 4, 2007 9:45 AM, Wolfgang Jeltsch <eu8lewwp@acme.softbase.org> wrote:

> Am Dienstag, 4. Dezember 2007 18:22 schrieb Conal Elliott:
> > Or you could just as well flip over the hierarchy and use Grapefruit.FRP
> > and Grapefruit.Graphics.UI, since after all, FRP and UI are two aspects
> > *of* Grapefruit.

> I don’t want to do this.  In the current module hierarchy, the top-level
> modules denote topics, not libraries.  Grapefruit isn’t a topic.

I agree with this description of the current modules hierarchy: it's about topics.  Unfortunately, topics are exactly what hierarchies do badly and links, tags, and searching do well.  (See "When Does Ontological Classification Work Well?" in Shirky's article.)  On the other hand, I don't see a problem with a quite flat hierarchy rooted in package names.  Moreover, package names must be unique anyway, so non-ambiguity is covered.  Within a package name, there could be just a flat collection of modules or additional structure, as the package authors decide.  What's currently topics would be moved to multiple tags in the Cabal file.  Additional tags could go into source files via a new "TAGS" comment/pragma, as desired.  Links as well, as already supported by Haddock.

>								    It’s just a
> library which started as a monolithic thing and is now becoming a collection
> of multiple packages which deal with different topics.  Some of them can be
> useful outside Grapefruit, like the FRP core.

I'm glad to hear it.  Go, reusability!  I've doing the same sort of refactoring (of Eros), leading to DataDriven, DeepArrow, Phooey, TV, and GuiTV, and more in progress.

> > In other words, Grapefruit is as much a sub-category of FRP as FRP is a
> > sub-category of Grapefruit.  Each can be viewed as an aspect of the other.
> > Similarly for *all pairs of categories* in the two paragraphs above ((E)DSL,
> > FRP, Graphics, UI, Grapefruit).

> True in general.

> > But "can be viewed as an aspect of" is not the same as "is exclusively
> > contained in".

> The Grapefruit FRP core (which is rooted at FRP.Grapefruit or Grapefruit.FRP)
> is exclusively contained in the set of all FRP implementations and it is also
> exclusively contained in the set of all Grapefruit packages.

Hm.  Maybe a language disconnect.  By "*exclusively* contained in", I meant "contained in and only in".

> > That difference is why tagging and linking works, and hierarchy doesn't.

> Hierarchy works (to a certain degree) if there is an order on the aspects
> which says which aspects should be at a higher place in the hierarchy than
> others.  For example, an order which says that topics should go higher than
> library names.  Then you would just collect all aspects of a package, order
> them according to this order and would get a path for the module hierarchy.

I don't know where this order of "which aspects should be at a higher place in the hierarchy" can possibly come from, beyond the simple case you mentioned (topic vs library).  (See "Great Minds Don't Think Alike" and "Of Cards and Catalogs".)

> > Tagging and linking embody the multitude of relationships we find both in
> > reality and in theory, particularly including relationships that emerge
> > over time.  Hierarchy embodies only one relationship, which is containment,
> > and that one relationship turns out to be fictitious in most circumstances.
> > Again, I highly recommend the Shirky article.  See particularly -- The
> > Parable of the Ontologist, or, "There Is No Shelf" [1].
> >
> >[1]http://www.shirky.com/writings/ontology_overrated.html#parable_of_the_ontologist

> This sounds reasonable.  What about discussing this on the libraries list?

Yes, this discussion fits the libraries list.  Knowing that hierarchy is a deeply ingrained a mental habit, I prefer testing the waters in a specific & reality-grounded setting like this FRP & Yampa list.

> My current problem is that I need a solution now.  This solution won’t be
> perfect but I want more or less the best solution we can achieve with the
> current system.  I don’t want to wait with the first Grapefruit release until
> we have tagging and linking. :-(

> So your ideas are definitely important in the long term why I’m searching for
> a short-term solution.

Me too!  I suggest PackageName.WhateverYouWant.MoreYetIfUseful plus tagging.
===
*  Another to Henrik: ++++>
On Dec 4, 2007 10:40 AM, Henrik Nilsson <nhn@cs.nott.ac.uk> wrote:

> Hi Conal,

>  > Good enough for what?  How about we get clear on our goals for this
>  > decision and *then* implement them.  Here are some possibilities I've
>  > heard so far in related discussions on #haskell, as well as in this
>  >
>  > (a) Unambiguous for import from a client program
>  > (b) Descriptive
>  > (c) Browsable
>  > (d) Consistent with other libraries
>  > Please suggest more if you think of any.

> Well, (a). I thought that was pretty obvious, as that's the only
> intrinsic role I was aware of for the present Haskell module names.

Me too.  I heard the others in a #haskell discussion.

> I disagree with you on (d). When working with others, I think it's
> important to strive for consistency to the extent possible. I'd
> probably go as far as saying that having two, or more, competing
> conventions in any one context is worse than one, even if that
> one is broken in certain aspects.

Today, we continue to perpetuate hierarchy because others did yesterday.  Tomorrow still others will do so because we did today.

> While I agree that hierarchical classifications are inadequate
> (and have thought so even before this discussion), I have yet to
> be convinced that the current hierarchical Haskell module name
> space is populated in such an awfully inconsistent way that there
> is not even possible to try to fit in with what is there.

> Again, maybe I am wrong. I'm willing to be convinced by concrete
> evidence.

I see examples in Data & Control, which is perhaps a clear distinction in imperative programming, but not nearly so clear in functional programming.  Why is list data and not control?  Why is Monoid in Data, while Monad, Applicative, and Monad (and now Category) in Control?  Why are some instances of Control classes in Data?  Why is UI under Graphics, although there are non-graphical UIs?  Why isn't Text under Data?

I don't know whether you'll find these examples compelling or not.  Some people do, and some don't

> As to

> (b) Descriptive
> (c) Browsable

> they are interesting, but I think that can be addressed through other
> means than the module name.

I agree and add that they're addressed *more* effectively through other means, as the Internet shows.
===
===
* Taxonomy: ++++
*  Response to Antony C: ++++>
Thanks, Antony.  I'm glad to have another voice in this conversation.

> 2.  I have always assumed that "Control" meant "Control Flow".  FRP is
> most certainly about a certain approach to control flow, namely one
> where control flow is determined implicitly from data flow
> dependencies and the flow of time.  (Telling someone, "think about
> data flow rather than control flow" is still a statement about control
> flow (among other things).)

I didn't realize anyone was thinking of FRP as an approach to control flow.  I think of it as an *alternative* to control flow.

For me, behaviors (reactive or otherwise) are very like lists and functions.  Where an imperative program would use control flow, a functional program would use lists, functions, or behaviors.

When I look at base/Control/*, the common thread I see is type classes whose most popular current applications happen to be imperative programming.  (Hopefully that will change.)  I think that's the sense of "Control" being used: imperative programming.

If you really want to use the hierarchy in a conventional taxonomic way, I'd say ask whether you're exposing an imperative or functional semantics.  If imperative use Control.Reactive.*, and if functional use Data.Reactive.*.

> The hierarchical module system is just a simple structuring tool to
> avoid name clashes.

As much as that's the case, the choice of PackageName.* (with deeper levels if you want) is simpler, more predictable, and more effective at avoiding name clashes than the currently common practice.  For instance, there were recently three packages with Data.Stream.  Two were merged, leaving two compelling and incompatible modules.  My naming suggestion guarantees the absence of clashes, since package names are already required to be distinct.

>      We all seem to agree it is based on an
> intellectually flawed organizational model.  Since bucking the
> conventions won't fix the flawed model

Bucking convention contributes a little toward change, just as following convention adds still more inertia.  Neither choice has much effect by itself.

>					, just suck it up and follow the
> existing convention so users can spend more time thinking about their
> applications and FRP and less time thinking about the hierarchical
> namespace.

Won't another choice give users just as much time to think about FRP and their applications?

> 4.  The only wrong choice of where to put Yampa in the hierarchical
> namespace is not providing a conveniently packaged release of Yampa
> because we're too afraid of making the wrong decision about where it
> fits in the hierarchical namespace.  Please just pick something
> arbitrary but conventional for namespace and get Yampa itself packaged
> up and out there for others to experiment with with minimal hassle or

I agree with the recommendation to get Yampa & Grapefruit released in a way that avoids name clashes, which as you've mentioned is the purpose of the hierarchical module system.  I'm not sure where conventionality helps or unconventionality hinders making conveniently packaged releases.  I do see where the unconventional approach I've suggested helps: simplicity, predictability, and uniqueness (guaranteed absence of name clash).
===
===
* Made plane reservations for Charlotte & Audrey to visit Dec 30 -- Jan 6.
* Reply to Kristopher Blom on the Yampa list: ++++>
Hi Kristopher,
I'm delighted to hear that temporal continuity is important to you.  It's one clear that functional programming can do elegantly that imperative programming is inherently unfriendly to.  Continuity -- in time, space, or whatever -- is more nicely compositional than discreteness.

My original name for this paradigm was "reactive behaviors", and indeed Fran was called RBMH -- reactive behavior modeling in Haskell -- until shortly before submitting for publication (or maybe even just before camera-ready revision).  The "reactive" part was indeed alluding to the discrete aspect (events) and the "behaviors" part to the continuous aspect ("flows", or functions of time).  Until reading (really, re-reading) your note, I didn't realize that we'd lost the continuous aspect in renaming to Fran and then FRP.  And not just the continuous aspect, but the *autonomous* aspect as well.  The most beautiful thing to me about the Fran combinators was exactly the interplay between the continuous and discrete, with switcher to turn events into behaviors and snapshot to turn behaviors into events.

I liked the term "reactive" until your note jogged my memory.  Now I'd say it's just one aspect of FRP, and not the more distinguishing, and certainly not the hybrid nature.  Of course the paradigms of (a) taxonomy and (b) follow-the-pack will press us to pick one aspect to elevate over the others.  Whether and how much we succumb to those pressures is up to us.
===
* Long collaborative discussion with Duncan Coutts about applicative software management (recompilation etc).  Starting with the ideas I talked about in Freiburg: time-varying compilers & source code.
* Reply on the nvc-cert list: ++++>
> It dawns on me that the concepts are observations in regards to balance
> in relating. Do we need balanced relating? Could that be at the root of
> the feeling of urgency I got in relation to these words? That we, even
> in a hierarchically structured society, need balanced relating on some
> level and with some people. And that there is a tendency to regard those
> with which I have such a balanced relating as "people like myself". The
> very notion of friendship. The need for balance and reciprocity in some
> way and on some level so that it "evens out".
I'm guessing that balance, reciprocity, and especially evens-out here are are still attaching to the same energy as fairness and equality.  "Mutuality" is another NVC word I typically hear this way.  A common thread for me is an entanglement of my choice to give to someone with their choice to give to me.  When I'm making all of my giving choices joyfully (what Marshall calls living *self-fully*), they're all for me not for the other person, and so the other person can't possibly get into debt to me.  Whenever I get grumpy and start thinking about balance or fairness, I know I've been playing Loan Shark again.

When my needs are met, I have no interest in abstractions like fairness & balance.  When my needs are unmet, those abstractions will only get me stuck in strategy attachment (entanglement).  Worse, others will recognize the weapon I'm wielding (coercive moralism) and will resist their natural impulse to connect with me openly.
===
* Misc tweaks to my Haskell libs.  I'm now using @g :. f@ and @f :$a@ in place of @g O f@ and @App f a@. * My new Reactive stuff is shaping up well. Where to go next with it? Do I rebuild all of DataDriven and then Phooey, GuiTV, and Eros? While I'm at it, what about switching to an Arrow interface, attempting to get rid of the IO stuff in DataDriven? Start with defining the combinators in DataDriven. * Added the DataDriven combinators to Reactive. They all came out beautifully, and I was able to remove //all// IO! * New [[Reactive wiki page| http://haskell.org/haskellwiki/Reactive]]. * Voluminous Discussion on haskell-cafe, which I renamed to "Do real programs need IO" +++ * Reply to Lennart A: ++++> > IO is important because you can't write any real program without using it. Ouch! I get awfully discouraged when I read statements like this one. The more people who believe it, the more true it becomes. If you want to do functional programming, instead of imperative programming in a functional language, you can. For instance, write real, interactive programs in FRP, phooey, or TV. And if you do, you'll get semantic simplicity, powerful & simpler reasoning, safety and composability. === * Reply to Daniel F: ++++> On Dec 9, 2007 10:07 AM, Daniel Fischer <daniel.is.fischer@web.de> wrote: > Interactive programmes without using IO? Cool :) And how! > I think you misunderstood Lennart. Thanks for checking. In this case, I think I understood Lennart fine and that he was saying what you're saying. > Would you deny that any useful programme has to do at least some of the following: > -accept programme arguments at invocation > -get input, be it from a keyboard, mouse, reading files, pipes... > -output a result or state info, to the monitor, a file, a pipe... If by "programme", you mean the code I write, then I'm happy to deny that my programme has to do these things. Examples below. If you include a stateful RTS, then no I don't deny it. > I think Lennart was referring to that, you HAVE to know a little IO to write programmes, at least getArgs, getLine, putStr(Ln), readFile, writeFile, appendFile. And therefore some use of the IO monad has to be taught relatively early. Explicit imperative programming is just one way to deal with input & output, not the only way. As proof, see FRP, Pan, or TV programs, which contain uses of none of these functions. (Nor could they, as these libraries are functional, having IO-free types and semantics.) Moreover, use of imperative programming sacrifices some of the semantic simplicity & composability that makes FP so appealing. That's why I'd like to see this belief in its necessity dispelled. That said, I don't think the existing functional (non-IO) approaches to interaction are quite there yet with the flexibility of imperative programming. It will take more work to get them there, and that work is mostly likely to be pursued by people who doubt the necessity of IO for writing "real programs". In that sense, Lennart's and your statements are self-fulfilling prophechies, as are mine. BTW, if you haven't seen it already, please check out [[TV|http://haskell.org/haskellwiki/TV]]. The TV (tangible values) approach includes a simple algebra of interfaces (input/output) and keeps separable from the core computation. The separability allows the interface parts to be composed in parallel with the core part. For instance, when two function-valued TVs are composed, the interfaces are peeled off, so that the core functions can be composed directly. The output half of one interface and the matching input half of the other are discarded. The remaining input and output halves are recombined into a new interface, which is used as the interface of the composed TV. The core interface algebra can be used for text stream i/o, GUIs, and many other possible styles of information passing. I mention TV, because it's an example of combining the purity & composability I love about FP with the usability a "real" app. For more about this combination, please see my Google tech talk [["Tangible Functional Programming: a modern marriage of usability and composability"| http://conal-elliott.blogspot.com/2007/11/tangible-functional-programming-modern.html]]. That talk focus on end-user composability, but the essential points apply as well to explicit programming. As I mentioned before, TV (a) is currently less flexible than imperative/IO programming, and (b) has the composability, guaranteed safety, and amenability to reasoning of pure functional programming. === * Unused bit: ++++> I can get access to all of this info //without// using IO (examples in my previous note and below), just as I can write any pure function without mixing in imperative (IO) code to read arguments and write results. Argument reading and writing (like thunk evaluation) happen at the implementation level (in the RTS) without complicating the semantics. === * Reply to Daniel F: ++++> Thanks for the clarification. >AFAIK, the only way to get input and produce output is via the IO monad Now you know something different, don't you? FRP, Pan, TV. Also Grapefruit, functional forms, and others I'm not thinking of or don't know about. As for your example, mixing the IO with the functional, as you have interferes with composing the result. I can think of two alternatives. One is to move reading & printing from the definition to the uses, as with Unix stream filters. Still convenient, and much more flexible. Of course, Unix programs were written in C and so use explicit I/O instead of lazy functional streams. (Though Doug McIlroy, who invented Unix pipes, knew that pipes were equivalent to coroutines and to lazy evaluation. See my "modern marriage" talk (the video I mentioned) for more about Unix and TV.) A second alternative is to use TV to explicitly capture the interface (I/O), which could look like this: \begin{code} coolTV :: TV (String -> String) coolTV = tv (olambda (fileIn "Data.txt") defaultOut) performAwfullyCoolFunctionalStuff \end{code} where \begin{code} tv :: Output src snk a -> a -> TV src snk a \end{code} The type parameters src & snk are for various interface styles. Then coolTV can then be used on //either// side of a TV-style pipe, resulting in the removal of the reading or writing half. And yes, there are //some// uses of IO for which I'd be hard pressed at this point to offer you an alternative. Which is a far cry from IO being necessary for all "real" programs, even today. Given this state of affairs, I'd prefer the Haskell community to point newbies away from IO and toward purely functional programming for things like UIs and graphics and help them change their way of thinking. Let's also admit that we haven't yet figured out how to apply our functional paradigm as flexibly or broadly as we'd like, and so meanwhile we have thi monadic IO trick that let's them write nicely factored imperative code that can call into the functional pieces. === * Another to Daniel F: ++++> I'm glad you got to check out TV and like what you found. Sorry for the missing haddock docs. I'm waiting for hackage to catch up with haddock 2.0. TV can use the IO & OI types for output & input, and it was convenient for me to do so. It would make a stronger & clearer point if I'd buried IO more into the implementation (RTS) of an O type and an I type. Maybe there's also a cleaner & safer implementations of O & I. If you look at GuiTV, you'll have to dig more deeply through layers implementation before you find IO. If you want to dig yet further past GuiTV's RTS into GHC's RTS, you'll find yet more mutable state. The original question, as I understood it, was whether real programs need IO. People can write real GuiTV programs and not see a bit of IO. Yes, I did mean the library as part of app-writers' RTS, in the sense that the app writer sees and writes no IO. That part of the RTS happens to be implemented in Haskell. And I'm very glad Haskell still has IO, because I don't yet know how implement some of those layers functionally. BTW, I think TV core and GuiTV themselves are purely functional. The OI/IO specialization is separable, but I didn't bother. The generality and purely functional nature of TV would probably be made clearer by factoring out another library for text I/O. The layers beneath GuiTV (including wxhaskell) also illustrates another point, which is that there are things I don't yet know how to implement in Haskell without IO. So I'm glad it's there (for a subset of "real programs"). If you specialize TV to the OI/IO instance, then your description of it is another partial truth: TV a library that supports & enforces separation of algorithm and IO. And in doing so, it restores composability. Ditto for algorithm & GUI, and probably many other kinds of interfaces to the algorithmic heart of a program. === * And to Lennart: ++++> > You can't write any program in Haskell without using IO, because the type of main involves IO. It so happens that Haskell currently insists on main :: IO (). That's simple to fix, however, and with great pay-off. Suppose instead main :: TV a (where I'm omitting the other TV type args for simplicity.) Then a program could not only be run, but also composed with other programs. They could even be composed by the end-user *while running*, as in Eros. Similarly, ghci implicitly inserts "print" when given a non-IO type. We could make that mechanism a little more general, and allow implicit insertions of other kinds of renderers, for purely functional images, music, 2D & 3D geometry & animation, interactive GUIs, etc. === * The heart of the matter for me is not whether IO happens somewhere, but how it affects composition. For instance, IO is not part of how I think about and compose numbers. Before I do my computation, however, I'd like to get a number from somewhere, and after I get my result, I'd like to show it to someone. So IO is handy on the outside of numeric composition. Similarly for other functional data types. We could design IO into the semantics of our pure data types, but doing so would interfere with the simple semantics === * Reactive: ++++ * My implementation is massively multi-threaded (via STM). Will I run into trouble with wxHaskell and/or OpenGL? I'm not sure I need real multi-threading. * I probably also want some guarantees on order of response for events, and STM will probably not be able to make those guarantees. * Started to roll my own monad for interleaved computation, with simple IVars for scheduling. === * Reactive: ++++ * On reflection, perhaps there's a strong correspondence between programming with (a) threads & blocking or (b) events & call-backs. When a thread blocks on an input that's not yet ready, the scheduler places the thread's continuation into a queue associated with that input. When an input becomes available, the scheduler resumes all threads waiting on that input. Correspondingly, an event can have call-backs registered with it, to be executed when the event occurs. One difference is that typically event registration happens once for several call-back invocations, while thread continuations must get repeatedly suspended in order to be repeatedly resumed. The difference is a trade-off between convenience and flexibility. A contribution of FRP is to wrap this sort of machinery in a purely functional interface. * STM has @orElse@. For repeated invocation, the event approach can just registers the same callback twice. Single invocation is probably doable, but a bit trickier: have the call-back remove itself from both client queues. I wonder if STM does something like that. * Here's a simple model and implementation for multi-threading with cooperative scheduling. A computation takes (as argument) a queue of runnable threads and yields an IO (more generally, a monadic value). @fork@ adds queues a new thread (or queues the current thread and switches to the new one). IVars are represented by an ref that contains either a value or a queue of continuations to perform once the value becomes known. Reading an empty IVar causes the current continuation to get added to the IVar's queue and the next queued thread to get removed from the thread queue and run. If the thread queue is empty, then raise a deadlock exception. Writing a empty IVar causes all of the IVar's continuations to get applied to the new value and the resulting threads enqueued. === * Idea on functional programming & perception: ++++ Consider types like numbers, booleans, lists, pairs, etc. If someone types an expression of the former types into hugs/ghci and see a result, with no visible IO. They mean "print" (an action) and want that implicit action executed, but they like not having to say it. Now consider types like images, animations, GUIs, etc. Print is based on Show, and strings don't work well for this second variety of types. The trick of the implicit print command doesn't work, and one instead has to use an explicit command, and IO is made plain. Perhaps people conclude (consciously or unconsciously) that the difference is in the types themselves, rather than in the surface characteristics of their interpreter's REPL. So here's a simple idea: introduce another class with a more flexible and modern method for presenting values. For instance, \begin{code} class Present a where present :: a -> IO () \end{code} The important piece in influencing perception is to teach Haskell REPLs about the Present class. When someone enters an expression e of type t, if t has a Present instance, then do "present e". Otherwise, do "show e". Either handle IO expression as a third case, or give IO a Present instance (present == id). I've ignored a possible return value b :: t' for present, but we could add it in by showing the result or (more fun) starting the analysis all over again. (Consider whether t' has a Present instance, ....) While the Present class is clearly expressive enough, one might object that present can do arbitrary IO, while print can only do putStrLn. It might be worth formulating safer but still fairly flexible ways of presenting values than the IO type. We might also want to use a nicely compositional approach to presenting values, such as the TV algebra (oprim, opair, olambda, iprim, ipair). When IO semantics are genuinely present as part of understanding a type, I'd like that information to be prominent. Where the present idea appeals to me is for types that have much simpler and safer semantics than full-blown IO (which includes way more than I/O), such as images, animations, 3D models, GUIs, and file processors. === * Title: "Toward composable Haskell programs". Haskell functions, particularly pure ones, are highly composable, but Haskell //programs// are not. The reason is simply that a program is defined by its @main :: IO ()@, and values of that type can only communicate through side-effects. One weak form of composition remaining is Unix-style piping of byte streams. [Note limitations & inconsistencies, as in my "modern marriage" talk: monomorphism, parsing & unparsing, linear chains.] * Nice [[blog post| http://www.etoy.com/blog/archive/2007/11/26/tangible-functional-programming.html]] about my "modern marriage" Google tech talk. * Playing with an idea for relaxing the type of @main@, following TV: ++++ \begin{code} class Presentable a where present :: a -> IO () class Acceptable a where accept :: IO a instance Presentable (TV a) where present = runTV instance Presentable String where present = putStr -- putStrLn ? instance Acceptable String where accept = getContents instance Presentable (IO ()) where present = id instance (Acceptable a, Presentable b) => Presentable (a -> b) where present f = accept >>= present . f \end{code} === Hm. @Presentable (a -> b -> c)@ wants to @getContents@ twice. * Is there an @Acceptable@ instance for @(a -> b)@, dual to the @Presentable@? I don't see how to define it. If there's no dual with the types of 'present' & 'accept', then perhaps there are other types to use. * Here's a simple way to implement reactive values. Gather up all of the primitive source events into one (including keyboard & mouse). Take the first occurrence and feed it into the reactive value, to get a list of consequent IO occurrences and a residual reactive value. Repeat. This idea is very like some of the old reactive behavior representations, so review them. Next, consider an arrow version, so as to hide the signals. * Some thoughts on Reactive: ++++ * Have the wxHaskell callback overwrite itself. * Each input control type has its own type of event/flow content. Maybe make some classes parallel to wxHaskell's classes. * Have mouse position and slider values be //behaviors//, not events. Pull, don't push. Use on/off events to trigger listening vs ignoring. Distinguish logically discrete events from simulation of logically continuous behaviors. One clue in distinguishing is whether the window system automatically discards some of the "event" occurrences. If so, it's likely to be logically a behavior implemented as an event. * Use snapshot to throttle down rapidly changing output values. More accurately, to switch from continuous (behaviors) to discrete (events). * Use arrow //recursion// (@loop@) for stateful reactive values. * Have one or more idle/tick events. Use for event detection, integration, and output snapshot. * Replace down/up and enter/leave with isDown and isIn boolean behaviors reactive values. Similarly for sliders (isDragging). What about buttons? Maybe a transition from False to True, followed immediately (at the same time) with a reverse transition. * For mouse motion, try the type @Reactive (Maybe (Behavior Point))@. Drop isIn, or if useful, define isIn via the maybe-valued mouse motion. * Something puzzles/troubles me: I want to use an arrow style programming interface, but Arrow seems very inflexible. For instance, types like @a ~> b ~> c@ aren't typically useful for an arrow @(~>)@. Am I confusing levels (to use Robert Johnson's phrase)? How do I reconcile TV's approach, parameterized by arbitrary types, including functions, with the Arrow style, which always has one input type and one output type? Maybe to really do the arrow style for GUIs, I would do a lot of redesign to how widgets are specified and composed, as in Fruit. Then there would be just a single, simple UserInput type, which includes mouse & keyboard behaviors & events. Without an arrow approach, I don't know how to get an simple & efficient implementation of function over reactive values (RVs). Noodle over that one. * The arrow I've been thinking of is very like the Yampa's SF (and like my old "residual behaviors" representation): a function from RVs to RVs (signals to signals) is represented as a function that takes a time & value and yields a list of output values for that time and a new function from RVs to RVs. (Unlike Yampa, (a) there's a list of output values rather than one, and (b) the implementation is data-driven.) === * Bit of prose for conversation on #haskell: One thing i've been thinking about lately is the idea of "composable programs". The type of Haskell's "main" prevents them from being nicely composable. TV suggests an alternative, by keeping the interface part and the pure core combined but separable. Composition automatically modifies the interface part, which is not possible in typical IO-style Haskell programming. I'd like to revive the original beautiful Unix vision of composable apps, but in a modern, workable way (unlike Unix's strategy). That's what the "modern marriage" Google tech talk is about. * I want to test out my STM/Ivar-based Reactive implementation. What's a simple test harness? Start with a wxHaskell program. Did it. Died with "atomically was nested", as I feared it would. * Try to implement my IVar type (with Monoid, Functor, Applicative, and Monad) on top of Concurrent Haskell & MVars instead of STM and TVars. ++++ * @takeMVar@ blocks, waiting for an MVar to become full. When the blocking thread awakes, the MVar becomes empty. There's also @readMVar@, which leaves the MVar full (combining @takeMVar@ & @putMVar@). I'll want that variant. * @putMVar@ blocks, waiting for the MVar to become empty. I'd like an error instead, so use @tryPutMVar@, a non-blocking variant that returns a boolean. * What to use in place of @orElse@ for IVar @mappend@? Given two MVars, make a third that fills when either of the given two fills. Fork two threads, each of which does a @tryPutMVar@. I could have the winner also kill the loser, but I don't know that it matters. === * Got a bunch of help on #haskell with my ubuntu wireless card problem, especially from shachaf. He suggested [[ndiswrapper|http://ndiswrapper.sourceforge.net]], which looks like more than i want to get into today. I suspect that the problem is specific to my wap. Get another and see what happens. Or take my computer to Starbuck's or in town, just to see if I can pick up a signal. * Consider this definition: @type Parser = [Token] -> (ParseTree, [Token])@. It's sort of similar to a signal function representation. * Found in the [[Concurrent Haskell docs| http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent.html#v%3AthrowTo]]> ++++> @throwTo@ does not return until the exception has been raised in the target thread. The calling thread can thus be certain that the target thread has received the exception. This is a useful property to know when dealing with race conditions: eg. if there are two threads that can kill each other, it is guaranteed that only one of the threads will get to kill the other. === * Uploaded TypeCompose 0.3 to hackage. * Simplified IVar.hs and renamed Future.hs * Converting Phooey from DataDriven to Reactive. To do: @before@, @timedPoll@, event @Applicative@ and @Monad@. * Reactive: ++++ * Implemented @Monad@ & @Applicative@ instances for @Event@. === * We had lots of erosion on the driveway after the rain. Mark & Susie Duncan came out to look. * Conversation (saved) with vincenz (Cristophe Poucet) on #oasis about future values. Take-away: ++++ * He had a pretty entrenched assumption that the "times" I talked about were execution times, as in when values are received or computed. His assumption was reinforced by my prototype implementation. * The term "futures" has a strong connotation for him of an implementation with nondeterministic semantics. I might instead use "future values" and relate/contrast with "futures". In fact, my current implementation //is// a nice implementation of nondeterministic futures. Instead of fixing mine, perhaps I could build a deterministic notion on top of it. === * My video card stopped working. I called Ivan at PortableOne (800-650-4006). He said the GPU is replaceable, and recommended that I send it in rather than try to replace it myself. Something about their thermal protection process. I'll have to wait for folks to return to work on the 26th. Meanwhile, VGA mode works, and I can display at full resolution on my 24-inch monitor. * Read Baker & Hewitt's //The Incremental Garbage Collection of Processes// (1977). The topic is "eager beaver" evaluation (spawning a process per function argument), and the key question is how to prevent the useless threads from consuming resources. For instance, start up various evaluation methods for a problem and use the first result. The other methods are then useless, and some might consume resources forever. One technique is to kill the losers. Baker & Hewitt suggest instead using garbage collection. I wonder how I might use Baker & Hewitt's idea. I currently use the kill-process approach. * To automatically {{{strip}}} executables (reducing exe size), use {{{ghc ... -optl -s}}}. * Linux: ++++ * Remapped caps-lock to control, using [[these directions| http://www.columbia.edu/~djv/docs/keyremap.html]]. * Installed darcs (with apt-get). * Installed ghc-6.6 with apt-get. Needed for compiling ghc-6.8.2 from sources. * I have to be very careful with my bandwidth use. * darcs-got latest cabal, for use with cabal-install.. * downloaded ghc-6.8.2 src and extra-libs * Built & installed ghc-6.8.2! (Symlinks in /usr/local/bin, shadowing the symlinks ghc-6.6 in /usr/bin.) * Installing cabal-install, which requires Cabal-1.3.2, zlib, and HTTP. Install of zlib failed, due to missing zlib.h. apt-got zlib1g-dev, thanks to tips from shachaf. ++++ {{{ <conal> shachaf: that one was found. how did you come up with zlib1g-dev? <shachaf> conal: sudo apt-get install zlib<tab><tab>, and guess. :-) [22:28] <shachaf> conal: Then apt-cache show to verify. <shachaf> conal: In general, .h files are in -dev packages. <Cale> More general solution: apt-file search zlib.h <conal> shachaf: thanks for the tips! <shachaf> Cale: Well, this is usually faster. :-) <Cale> It appears to be in zlib1g-dev <shachaf> Cale: Since I already have the apt-get install typed in. }}} === === * Correspondence on thread GC: +++> Thanks, Simon. If I understand the mechanism you're describing, it discards readers of an empty MVar when there are no other references to the MVar *because* the MVar can never get written. And if there are other readers but no writers, then I'm guessing GC wouldn't know that, and none of the readers get discarded. Is that so? I think Baker & Hewitt's trick was analogous to discarding writers of an already full MVar when there are readers (readMVar) but no takers (takeMVar). (Though not quite, since readMVar is implemented via takeMVar & putMVar.) I guess that effectively means IVars instead of MVars. In either direction (blocked reader or blocked writer), the interface of MVars (or IVars) would seem to prevent an accurate analysis, since GC wouldn't know whether a Var reference was for reading or writing. Right? A simple solution might be to hide the Var itself and instead expose reader and writer halves. If there's an analysis problem at all, does that solution make sense? === * Hm. My graphics card started working again. I bought it on Feb 1, so assuming a 1-year warranty, I have about five weeks left. * Piq: ++++ * Working great with //reactive// used in place of //DataDriven//, but only when interpreted. * When I run compiled with {{{-O2}}}, my textures get made & reclaimed over & over. Also, the initial image set in imageDisplay gets used. I'd had it undefined, and giving it a definition required adding a @defaultColor@ method to @ToPixel@. * If I compile with (-O), no texture churn, but it runs very slow. * Maybe the problem stems from my use of @unsafePerformIO@ in Reactive's @Data.Future@. Adding some @NOINLINE@ pragmas. === * These problems start to show up even when I use just the //reactive// examples. Interpreted works, and compiled doesn't. * Tracking down the problem with compiled //Reactive//. Perhaps it's about laziness vs unsafePerformIO. * Finally found a solution: compile with {{{-threaded}}}. See docs on [[Haskell threads and Operating System threads| http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent.html#10]]. Speculation: ghci itself is compiled with {{{-threaded}}}. Without {{{-threaded}}}, I get improved reactivity but not perfect if I explicitly @yield@ immediately after the @putMVar@. In that case, output is always one step behind input. I'd rather solve the problem with @yield@ than rely on {{{-threaded}}}.2 * Similarlity: ++++ \begin{code} Data.Maybe.catMaybes :: [Maybe a] -> [a] Data.Event.justE :: Event (Maybe a) -> Event a \end{code} === Is there a common generalization? Yes, thanks to oerjan on IRC: ++++ \begin{code} joinMaybes :: MonadPlus m => m (Maybe a) -> m a joinMaybes = (>>= maybe mzero return) \end{code} === Filtering also: ++++ \begin{code} filterMP :: MonadPlus m => (a -> Bool) -> m a -> m a filterMP p e = joinMaybes (liftM f e) where f a | p a = Just a | otherwise = Nothing \end{code} === Alternatively (thanks to oerjan again): ++++ \begin{code} filterMP p m = m >>= guarded p where guarded p x = guard (p x) >> return x \end{code} === or even @filterMP = (=<<) . guarded@ (thanks to lambdabot). * Tracking down the problem with compiled //Reactive//. Perhaps it's about laziness vs unsafePerformIO. * Finally found a solution: compile with {{{-threaded}}}. See docs on [[Haskell threads and Operating System threads| http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent.html#10]]. Speculation: ghci itself is compiled with {{{-threaded}}}. Without {{{-threaded}}}, I get improved reactivity but not perfect if I explicitly @yield@ immediately after the @putMVar@. In that case, output is always one step behind input. I'd rather solve the problem with @yield@ than rely on {{{-threaded}}}.2 * Similarlity: ++++ \begin{code} Data.Maybe.catMaybes :: [Maybe a] -> [a] Data.Event.justE :: Event (Maybe a) -> Event a \end{code} === Is there a common generalization? Yes, thanks to oerjan on IRC: ++++ \begin{code} joinMaybes :: MonadPlus m => m (Maybe a) -> m a joinMaybes = (>>= maybe mzero return) \end{code} === Filtering also: ++++ \begin{code} filterMP :: MonadPlus m => (a -> Bool) -> m a -> m a filterMP p e = joinMaybes (liftM f e) where f a | p a = Just a | otherwise = Nothing \end{code} === Alternatively (thanks to oerjan again): ++++ \begin{code} filterMP p m = m >>= guarded p where guarded p x = guard (p x) >> return x \end{code} === or even @filterMP = (=<<) . guarded@ (thanks to lambdabot). :: 2007-12-27 [2007-12 day] * Poking around at my threading issue. //Two// yields after each @putMVar@ suffice for a simple example, but not a slightly more complex example. I guess I'll stick with {{{-threaded}}}. Unfortunately, I think each client executable must be compiled (linkled) with {{{-threaded}}}. Oh well. * I released (on Hackage) phooey-1.4 (on DataDriven), reactive-0.0, and phooey-2.0 (on reactive). Emailed yampa-users about reactive, asking for comments. * {{{ghc-pkg}}} was getting bogged down. Fixed with {{{make unreg}}} in {{{~/Haskell}}}. * Asked about double-buffering. Svrog on IRC offered to help me with opengl stuff. * Reply to "NVC and Christianity". * Wireless stopped working on my new computer. It turned out that I accidentally turned off the antenna while fiddling with boot settings when my GPU stopped working. Now I wonder if something similar happened on my linux computer. * Stumbled across [[Linux on the Acer TravelMate 800LCi| http://bernd-wurst.de/linux/tm800.php]]. Points to a [[Intel® PRO/Wireless 2100 Driver for Linux| http://ipw2100.sourceforge.net]]. * I found the [[Acer TravelMate 800 user's manual| http://www.acersupport.com/library/tm800ug.pdf]]. The wifi on/off button is right of the two silver buttons on the front. For a bit, both routers showed up with strong signals, though I still didn't get on the internet. Rebooted. Waited a bit, and it works! * Something is going badly with //Piq// on //Reactive//. Responsiveness slows down quite a lot with interaction. I wonder if it has to do with starting so many threads. In the Piq examples, I'm dragging the mouse around. Each "motion" event occurrence leads to a number of new threads. I'd expect sliders to be similar, and yet the slider examples in Reactive and Phooey have no trouble keeping up. Also, when compiled, I get a lot of unexpected computation. When I cover & uncover the display window, the contents all get recalculated. When I pan around, everything gets recomputed on every move. Wow -- now I see that the objects (index wrappers) are getting reclaimed immediately after displaying. Whether this happens depends on which modules are compiled with -O (or -O2) and which compiled without optimization, or are just interpreted. I think the crucial module is Display, which contains the memoization. Sure enough -- when I mark that module with {{{-Onot}}}, I stop losing my textures too soon. However, in that case, my textures never get finalized. Hm. I retried and got different results. * Got a note from Mads Lindstroem about Reactive. He pointed out that wxHaskell is not thread-safe, and that I invoke it from many threads. Did a bit of searching: ++++ * [[FFI and callbacks| http://www.haskell.org/pipermail/haskell-cafe/2005-August/011042.html]]. Tidbits: ++++ * A GUI toolkit's main event loop blocks, so either (a) use {{{-threaded}}}, or (b) set up a timer with a call-back that does a frequent @yield@. * "It may appear [safe to call wxHaskell GUI functions from several threads at once] at first and then mysteriously fail. I don't think wx is thread safe (since the underlying gui libs are not thread safe either). I don't believe that wx does it's own per-widget locking, but I may be wrong." * From the [[wxWidgets manual| http://www.wxwidgets.org/manuals/2.4.2/wx494.htm]] +++> If you do decide to use threads in your application, it is strongly recommended that no more than one thread calls GUI functions. The thread sample shows that it is possible for many different threads to call GUI functions at once (all the threads created in the sample access GUI), but it is a very poor design choice for anything except an example. The design which uses one GUI thread and several worker threads which communicate with the main one using events is much more robust and will undoubtedly save you countless problems (example: under Win32 a thread can only access GDI objects such as pens, brushes, &c created by itself and not by the other threads). === === === * Ubuntu: ++++ * {{{dpkg -L <pkg-name>}}} to list the contents of a package. * {{{/usr/share/emacs/22.1/}}} * [[Jabber IM in Emacs| http://www.emacswiki.org/cgi-bin/wiki/JabberEl]] === * Interested in Vertigo revival: Ivan Tomac (svrog, tomac,pacific.net.au), jsnx (Jason Dusek, jsn,rtc0.net). * Reactive: ++++ * I tried the @yield@-in-a-timer approach. It works fine, with an interesting artifact: if I drag a slider around, the output takes several iterations to catch up. Adding in {{{-threaded}}} eliminates the artifact. I think it's because the various threads get scheduled //fairly//, which means the main thread (with the event loop) is scheduled as often as the reactive threads. Without {{{-threaded}}}, the event loop blocks. I can get fairly zippy performance by setting the timer interval for 10ms (100/sec). However, there's an interesting remaining problem: //all// of the motion events get processed. The slider threads finish their work very quickly, and the consequences of those inputs pile up and all get executed eventually. * Is there literature somewhere on the practice of discarding occurrences of motion "events"? * Idea: have each output save its latest value in a ref. Thanks to laziness, overwritten values won't actually get computed. Each output also has a timer, which grabs the most recent value and sends it out. Make the ref be Maybe-valued, and reset to Nothing after output. Better yet, turn off the timer after output and turn back on when a value arrives. That way, outputs won't eat cycles when they quiesce. Tried out this idea. Work well with a Maybe-valued ref and a timer or idle that keeps going, but not with a timer that turns on & off. I don't know why. * Pushed new versions of Reactive and Phooey. Piq still acts weirdly. Maybe because of multi-threading and OpenGL. * I think I'm okay with the texture allocation & drawing done under @forkR@. They won't really get evaluated until they're pulled on by display, which happens in an idler (an @on idle@ handler). * On the other hand, the texture finalizers are now happening outside of the main thread, which is probably problematic. * When I drag around an image, it starts out fast and slows down dramatically, even when there's no more allocation going on. === * Consider [[UIST| http://www.acm.org/uist]] 2008, October 19-22, 2008 in Monterey, CA. The submission date is not yet posted, but last year's was March 30. * Looking into multi-monitor support for Ubuntu on my Acer 800lci. The card is an ATI mobility Radeon 9000. * Trying to get multi-monitor to work in Linux. Got jabber account on chat.seereason.org. Getting help there. Installed {{{grandr}}}, which crashes. Found ATI linux drivers. Wait until I have more bandwidth. Type the text for 'New Tiddler' Type the text for 'New Tiddler' * 2008! * Various chats on #haskell related to TV/Eros and the idea of composable apps. Spin-off conversation with [[yaxu| http://yaxu.org]] (saved). He pointed me to this [[livecoding demo| http://homepage.mac.com/digego/study_in_keith.mov]]. Wow. * Wondering about the weird Piq behavior of progressively slowing when dragging. It effects both zoom & pan. In contrast, changing the image itself doesn't have that problem. For testing, I defined a widget that just computes a cumulative pan/zoom, with no opengl stuff. It has the same slow-down, including a fast-growing space leak. The memory stays allocated even after I quit the app. Now I believe I'm closer to finding the source of the problem. Whew! * Leak occurs with @motionDiff'@. * Stimulating exchanges on NVC-cert about Christianity and about "rights". * Thought: Here's an interpretation of whatever Jesus said what got translated as "Judge not, that you be not judged. For with the judgment that you pronounce you will be judged .....". Perhaps he meant that when we think we're judging others, we're really judging our shadow selves. * To allow IRC messages from unregistered users: {{{/msg nickserv set unfiltered on}}}. * Finally i found a mouse example that does not space leak. The crucial difference seems to be absence of @joinMaybes@ (previously called @justE@). ++++ \begin{code} -- | Pass through @Just@ occurrences. joinMaybes :: MonadPlus m => m (Maybe a) -> m a joinMaybes = (>>= maybe mzero return) \end{code} === Space-leak also with this specialization: ++++ \begin{code} justE :: Event (Maybe a) -> Event a justE = inEvent (h =<<) -- note =<< instead of <$>
where
h (Just a  Stepper e') = return $a stepper justE e' h (Nothing Stepper e') = eFuture (justE e') \end{code} === Time to look at the @Monad@ instance. ++++ \begin{code} instance Monad Event where return a = Event (pure (pure a)) e >>= f = joinE (fmap f e) joinE :: forall a. Event (Event a) -> Event a joinE = inEvent q where q :: Future (Reactive (Event a)) -> Future (Reactive a) q futre = futre >>= eFuture . h h :: Reactive (Event a) -> Event a h (ea Stepper eea) = ea mappend joinE eea \end{code} === Hm. Could the space-time leak be from increasingly many active threads? If the scheduler is playing round-robin with them, then a linearly growing set of threads would slow down about as I'm seeing. But these threads would all have to be runnable, rather than terminated or blocked on empty MVars. First simple test: take the non-leaking example and apply @(>>= return)@ to an event. Now it leaks. What happens in @(e >>= return)@? ++++ \begin{code} e :: Event a fut :: Future (Reactive a) get :: IO (Reactive a) merge :: Future (Reactive a) -> Future (Reactive a) -> Future (Reactive a) e = Event fut fut = Future get e >>= return Event (Future get) >>= return joinE (fmap return (Event (Future get))) joinE (Event ((fmap.fmap) return (Future get))) Event (q ((fmap.fmap) return (Future get))) Event ((fmap.fmap) return (Future get) >>= eFuture . h) Event (fmap (fmap return) (Future get) >>= eFuture . h) Event (future (fmap (fmap return) get) >>= eFuture . h) h (return a Stepper eea) return a mappend joinE eea Event (pure (pure a)) mappend joinE eea Event (pure (a stepper mempty)) mappend joinE eea Event (Future (return (a stepper mempty))) mappend joinE eea Event (Future (return (a stepper mempty)) merge unEvent (joinE eea)) a stepper Event (mempty merge unEvent (joinE eea)) \end{code} === So what happens with @mempty merge fut@? Since @merge@ cannot know that @mempty@ will never occur, it has to carry the @mempty@ around indefinitely. In the case of @joinMaybes@, there are a lot of @mempty@s. They all pile up. Fix: make a special representation for never-occuring futures so they can be recognized. ++++ \begin{code} data Future a = Future (IO a) | Never \end{code} === Did it. Works out //very// nicely. Allows many optimizations, such as: ++++ \begin{code} Future getf <*> Future getx = future (getf <*> getx) _ <*> _ = Never \end{code} === * Journal of [[Visual Mathematics| http://www.mi.sanu.ac.yu/vismath/pap.htm]] * Replied to James McCartney (father of [[SuperCollider| http://supercollider.sourceforge.net]]), who wrote me asking about fusion and its applicability to algorithmic music. * Replied to David Duke about functional graphics and TV. I'm delighted with his question of the denotational domain for 3D scenes with internal interaction. That's exactly the central question in my design process. And one that I almost never hear asked. * Reactivity: ++++ * I want to do a third representation of reactive behaviors, to go with the listerner algebra version and the multi-threaded, future-based version. This third one is similar to my old "residual behaviors" and the Yampa representation. In those two representations, sampling returned a value and a new behavior (or signal function). I'm thinking of something similar, but instead of sampling, it's about event occurrences. The heart is a way to represent a subset of @Event a -> Event b@: ++++ \begin{code} -- event occurrence type Occ a = (Time,a) -- represents Event a -> Event b newtype EF a b = EF (Occ a -> ([b], EF a b) \end{code} === * The catch is that I want to represent not just simple functions types like @Event a -> Event b@, but also @Event a -> Event b -> Event c@. Also, pairs and non-functions. My idea is to transform all cases into the form @Event a -> Event b@, possibly tupled with other such types. For instance, ++++ \begin{code} Event a -> Event b -> Event c =~= (Event a, Event b) -> Event c =~= Event (Either a b) -> Event c \end{code} === A non-function, @Event a@ can be converted to @Event () -> Event a@, and fed with the never-occurring event (@mempty@). Given a pair on the rhs of @->@, split into two functions. * What about reactive values? Take apart the reactive value into initial value and event. * Look at Fudgets again. I think [[Fudgets| http://www.md.chalmers.se/Cs/Research/Functional/Fudgets/publications.html]] used sums on communication channels. Yes, for parallel composition: ++++ \begin{code} (>+<) :: Fudget a b -> Fudget a' b' -> Fudget (Either a b) (Either a' b') \end{code} === * Generalized for arrows: ++++ \begin{code} class Arrow a => ArrowChoice a where ... (+++) :: a b c -> a b' c' -> a (Either b b') (Either c c') \end{code} === See also [[ProdArrows -- Arrows for Fudgets| http://www.cse.ogi.edu/~magnus/ProdArrows]] (talk slides). === * TV has visualizers for base (really any) types, pairs, and functions. In particular, functions have //interactive// visualizations. I can think of a GUI as visualizing a function of type @Int -> String@ (for instance), or as visualizing a @Reactive Int -> Reactive String@. These functions on reactive values are of a restricted form, such as @fmap f@ where @f :: Int -> String@. Perhaps I can loosen up the form to involve other reactive forms as well, specifically (a) accumulation and (b) events. * Simplifying insight about GUIs: ++++ * Start with the TV MVC insight: There are two kinds of GUIs: controllers & views (input & output). Each has atomic and composite forms. In particular, a //function view// combines an argument controller and a result view. * The new insight: //every// view (not just atomic) has a "value" attribute. For a function view, the attribute is function-valued. When the value attribute of //either// the function view //or// the contained controller is changed, the result view's value attribute is set to the latest function value applied to the latest argument value. * Often, the situation isn't quite so simple. The views might not be simply showing a static value, say a function mapped over one or more varying inputs. Instead, it could be a more general function involving reactive values and events. For instance, there could be continuous and/or discrete forms of accumulation (e.g., integration or event-counting). In that case, use a "stateful" kind of function, such as the @EF@ notion from [[2008-01-06]]. === * Instead of merging multiple inputs via @Either@, use a curried approach. Using @(~>)@ (right-associative) for @EF@, ++++ \begin{code} a ~> b ~> c == a -> ([b ~> c], a ~> b ~> c) == a -> ([b -> ([c], b ~> c)], a ~> b ~> c) \end{code} === A view for @a ~> b ~> c@ has two parts: an controller for @a@ and a view for @b ~> c@. When @a@ arrives, do what we always do: if the resulting @[b ~> c]@ is nonempty, then install the last element. In any case, carry on with the new @a ~> b ~> c@. In other words, curried functions needn't have any special treatment. * GUIs have dynamic values //and// dynamic views. For instance, consider a TV for @f :: a -> b -> c@. The outermost TV (@:: TV (a -> b -> c)@) has a static value (@f@) and a doubly-dynamic view (dynamic in the controllers for @a@ and @b@). The next inner one (@:: TV (b -> c)@) has a singly-dynamic value and a singly-dynamic view (@b@). The innermost one (@:: TV c@) has a doubly-dynamic value (@a@) and a static view (@c@). * Try out my @a ~> b ~> c@ idea from [[yesterday|2008-01-08]] on an accumulation example, e.g., two buttons and a display of the sum of presses. * Started writing blog posts about [[Functional futures]] and [[Another take on functional reactivity]]. Got to wondering why futures aren't just lazy values. I had thought I couldn't then implement the current @mappend@ operation, which yields the earlier of two futures. My thinking was that I'd have to force the values to WHNF prematurely. On reflection, I think that forcing wouldn't happen until the WHNF was demanded of them anyway. I simplified the implementation, though keeping the explicit data type and a @Never@ constructor. Old representation: ++++ \begin{code} data Future a = Future (IO a) | Never \end{code} === New representation: ++++ \begin{code} data Future a = Future a | Never \end{code} === The implementation simplifies accordingly. For instance, instead of ++++ \begin{code} Future getf <*> Future getx = future (getf <*> getx) \end{code} === I have simply ++++ \begin{code} Future f <*> Future x = Future (f x) \end{code} === The other cases (involving @Never@) remain as is. By eliminating @IO@ fm the representation, the uniqueness of value is perfectly clear. It works for all of my examples in Reactive, Phooey, and GuiTV, but not Piq. Hm! * Why not eliminate the @Future@ type altogether, in favor of using lazy values directly? ++++ * The @Never@ optimizations are very important (see [[2008-01-02]]). * A precise & deterministic semantics requires more information than just the value. It needs an associated time as well, or at least some kind of temporal total ordering. === * Discussion on #haskell about how to simply & cheaply block an IO thread forever. I was using @newEmptyMVar >>= takeMVar@. sjanssen pointed out that it will throw a "thread blocked" exception, as will @atomically retry@. I tried various incantations, but the only one that worked was sjanssen's @forever$ threadDelay maxBound@.
* Realization about the problem of blocking forever mentioned [[yesterday|2008-01-09]]: if I use CPS, then I can simply terminate.  Suppose I have a continuation @k :: a -> IO o@ to run after an @IO a@.  Then blocking forever would mean @k@ doesn't get run, so all I have to do is nothing.
* Think again about a correct (and hence deterministic) implementation of functional futures. ++++
*  What kind of thing is the //time// value that accompanies a regular lazy value?  It has to support a total ordering and have a lazy structure of its own.
*  Define a type of partial information about times. ++++
\begin{code}
data TimeInfo t = Exactly t | AtLeast t
\end{code}
=== Represent an "improving time" (from Warren Burton's "improving values") as an action for computing the latest known @TimeInfo@.  The action may return different time-infos, but the answers must be monotonic, w.r.t the information ordering.
*  For improving times @t@ and @t'@, to determine whether @t <= t'@, first spin two threads running race to force the exact values of the two times.  Whichever thread wins the races kills the other thread and then asks the question of how the loser compares to the winner's exact value.
*  There are really two different questions.  If @t@ wins, with an exact value @et@, then the question is whether @t' >= et@.  If @t'@ wins, with an exact value @et'@, then the question is whether @t > et'@ (note //strictly// less than).
*  The primitives we need then are @geT, gtT :: ITime t -> t -> Bool@.  These primitives are //semantically pure// in that they give the same result for the same arguments.  They may block for a while if the answer isn't yet knowable.
* Fixed my darcs problem on conal.net.  The darcs executable is in my {{{~/bin}}}, which was added to {{{PATH}}} in my {{{.bash_profile}}}.  That file only gets used for login shells, hence not for ssh-based commands.  The fix (thanks to shachaf on #haskell): set {{{PATH}}} in {{{.bashrc}}} instead.  Success!  Now I have my journal under darcs control, so the server copy gets updated incrementally.
* I've been noodling over how to handle pair-valued input conveniently.  I thought I wanted to use sum types, as in Fudgets, denoting an event in the first half or one in the second half of a pair.  On the other hand, I want a //pair//, not a sum, for initialization.  Magnus Carlsson looked into the mismatch between arrows and fudgets in [[ProdArrows -- Arrows for Fudgets| http://www.cse.ogi.edu/~magnus/ProdArrows]].  Here's a new idea I like a lot.  Think of the sum as encoding a way to //edit// a pair.  Then @Left a@ represents @\ (_,b) -> (a,b)@ and @Right b@ represents @\ (a,_) -> (a,b)@.  In other words @first (const a)@ and @second (const b)@.  This latter rewriting suggests a nice generalization.  Instead of streams of values, each //replacing// the previous, consume and produce streams of edit functions, each //modifying// the previous.  And define the edit functions compositionally, e.g., using [[DeepArrow| http://haskell.org/haskellwiki/DeepArrow]].  If this idea works out, its efficiency benefit could go much further than just protecting unchanging tuple members from changing tupler members.  Even for a single atomic value, I might be able to get incremental computation.  "Compositional incremental computation".
* Getting a better ATI video driver installed on my Ubuntu computer. ++++
*  Downloaded a big driver set-up from ATI, but it [[didn't work| http://lists.debian.org/debian-user/2007/08/msg00868.html]].
*  Experimented with [[xrandr| http://wiki.debian.org/XStrikeForce/HowToRandR12]] settings.  Found this combination: ++++
{{{
xrandr --auto
xrandr --output VGA-0 --left-of LVDS
xrandr --output VGA-0 --rotate left
}}}
=== That basically does what I want, but the quality is terrible.  My external monitor shows streaky text, and update is very slow.  Try again later.  Even with just a single head, 3D graphics runs considerably slower.  Experiment with driver settings to see if I can improve the performance.  If not, consider switching back to the proprietary ATI driver.
===
* More blog writing. ++++
*  Using [[Markdown| http://daringfireball.net/projects/markdown]].
*  I gave up on Blogger, where I've been keeping my [[technical blog| http://conal-elliott.blogspot.com]].  It removes one leading space per line of pre-formatted html on every edit/preview cycle.
*  Set up a [[new blog| http://conal.net/blog]] with WordPress.
*  Found a post on [[Syntax highlighting with Markdown in WordPress| http://www.dougalstanton.net/blog/index.php/2007/12/15/syntax-highlighting-with-markdown-in-wordpress]].  It uses a combination of [[PHP Markdown Extra| http://michelf.com/projects/php-markdown/extra]] and [[GeSHi| http://qbnz.com/highlighter]] (Generic Syntax Highlighter), plus a small extension to specify which language for a block of code.  It works great.  I tweaked the PHP code to make the default language be Haskell instead of txt.  The GeSHi-decorated code even has links to library documentation, which I love.
*  I had a bit of trouble along the way and got help on the #wordpress IRC channel.  It turned out that I hadn't followed the installation directions for PHP Markdown Extra, which say to move markdown.php out of its directory directly into my plugins directory.  Once I knew to look in the Apache error log, it was pretty easy to track down the problem.
*  Still to do: ++++
*   Better blog name & url.
*   Different theme.  I like horizontally stretchy ones.
*   When I want to go public, get Haskell Planet to point to the new blog.
===
===
* Blog obsession: ++++
*  I think I like this permalink structure: {{{/%year%-%monthnum%/%postname%}}}.  Then URLs are fairly informative, and I don't have to worry much about unique titles.
*  [[WordPress 2.3| http://codex.wordpress.org/Version_2.3]] has both //tags// in addition to //categories//.  I've been using categories liberally as tags.  Some themes show tags, and some don't.  Find one that does.  There's a category->tag converter under Manage/Import.  Be careful not to convert "Uncategorized".
*  There doesn't seem to be a command to rename a mysql database.  To delete it, do {{{drop database <dbname>}}}.
*  Christophe P (vincenz) has a fairly nice [[Haskell blog| http://notvincenz.blogspot.com/2007/07/higher-order-zippers.html]] in Blogger.  He writes straight html into a .lhs, pastes math bits in from [[wikybox| http://goessner.net/articles/wiky]], and run the result through hscolour.
*  On WP's Options/Reading: "Note: If you use the {{{<!--more-->}}} feature, it will cut off posts in RSS feeds."
*  [[How To Add Wordpress 2.3 Tags To Your Current Theme| http://richgilchrest.com/how-to-add-wordpress-23-tags-to-your-current-theme]].  I added a tag list and (for now) hid the categories in index.php and single.php, in the fluid-blue-10 theme.  I also tweaked the sidebar widget list in Presentation/Widgets.
===
* Exchanged email with Levi Stephen, who's implementing a web-server (in part) with my Reactive library.  I simplified & cleaned up his code.  Still some more fundamental rethinking to do.
* Talk title: "Rescuing Functional I/O".  (Also "rescuing" & "rehabilitating".)  Simple idea: return to the old stream model for I/O, but using functional events in place of streams.  Benefit over monadic I/O: tractable semantics for reasoning.  Benefit over old stream I/O: static typing, clarity, ease of use, composability.  (Guesses.)

* Ivan Tomac sent a bug report for Reactive.  switcher gave an infinite loop.  I'd suspected my definition wasn't well-founded, but I guess fooled myself into thinking it was tested.  I broke the definition cycle with an elegant definition of join for Reactive, and did a darcs push as well as releasing a new version (0.2) on hackage.  The definition: ++++
\begin{code}
-- Reactive 'join'
joinR :: Reactive (Reactive a) -> Reactive a
joinR ((a Stepper Event fut) Stepper e'@(Event fut')) =
a stepper Event fut''
where
-- If fut  arrives first, switch and continue waiting for e'.
-- If fut' arrives first, abandon fut and keep switching with new
-- reactive values from fut'.
fut'' = fmap (switcher e') fut mappend fmap join fut'
\end{code}
===
* NVC-Cert note: "Out Beyond Certification ...".
* Emacs-futzing for markdown editing (mmm).
* What do I really want to say in blog posts about reactive values? ++++
*  Working title "Reactive values from the future"
*  Briefly explain the meaning of reactive values and events.
*  Informally explain class instances for @Reactive@ and @Event@.
*  Stepper and Switcher.
*  Continuous reactive behaviors.
*  Reactive normal form.
===
* Blogging: Fixed the [[You don't have permission to do that| http://blog.fyais.com/2007/04/15/you-don%E2%80%99t-have-permission-to-do-that-another-solution/]] problem.
* NVC-cert note "Getting past helplessness -- a co-creative opportunity".
* sjanssen used kolmodin's hinotify to make [[a small mail notification script| http://hpaste.org/5070]].  His version is imperative throughout.  I bet it could be given an elegant functional formulation using Reactive.
* Replied to Pierre-Evariste Dagand about his FRP+arrow-based network overlay paper.
* Finished draft of blog post "Reactive values from the future".  It's rather long, and I think I'd like to split it into one on interface & implementation and a second one on the implementation, which is also what I did for future values.
* Replied to Levi Stephen about functional reactive web programming.  An excerpt: ++++>
I'm looking for applications of the Monad interface to events.  I think it's a very powerful tool for doing dynamic/branching things, and I strongly suspect there are compelling network applications.  In particular, join handles event-valued events.  For instance, the event of forming a connection to a web server might yield a new event for each new connection.  Each new event corresponds to the stream of requests for a given connection.  I haven't thought much about networking, and I'm wondering what kinds of applications would take multiple requests per connection.  Maybe I'm mixing up "connections" and "sessions".  And maybe I'm thinking too specifically here about one "connection".  Perhaps another example is user registration, which then yields a new user-specific event (stream of requests from that particular user).
===
* Think about a session example, say a multi-step interaction with cumulative "state" (which, btw, was John Hugues's prime motivating example for arrows). ++++
*  A little HTML UI with a button and a display saying how many times the user has pressed the button.  Of course, each user has a separate counter.
*  Add a display for the current average across all users.
*  Replace the counter with a thumbs-up/thumbs-down control (+1/0/-1) and a running total across all users, as in reddit.
===
* Replied to Pierre-Evariste about his "functional-reactive overlays" work.
* Working on my reactive value blog posts.  I discovered a lovely pattern in the denotational semantics of functional reactivity.  The meanings of the class methods is very simply defined in terms  of the same methods on the meanings of the method arguments. ++++
\begin{code}
me :: Event a -> ([] :. (,) Time) a

instance Functor Event where
me (fmap f e) = fmap f (me e)

instance Applicative Event where
me (pure a)    = pure a
me (ef <*> ex) = me ef <*> me ex

me (return a) = return a
me (join ee)  = join (fmap me (me ee))

mr :: Reactive a -> (Time -> a)

instance Functor Reactive where
mr (fmap f r) = fmap f (mr r)

instance Applicative Reactive where
mr (pure a)    = pure a
mr (rf <*> rx) = mr rf <*> mr rx

mr (return a) = return a
mr (join rr)  = join (fmap mr (mr rr))
\end{code}
=== This spec is structured after Haskell instance declarations, but it isn't legal Haskell code.  Note the somewhat obscure reformulation of @(Time,a)@ in order to leverage the instances for type compositions.  To do: make a new module @Data.SReactive@ and a set of QuickCheck properties that relate the implementation to the semantics.
* For the semantics of events, try replacing the list of futures with something like @newtype SEvent a = SEvent (Future (a, SEvent a))@.  I think then I'll get the time-sorting by construction instead of by constraint.
* Mulling over yesterday's realization about the structure of semantics for events and reactive values.  Suppose I have a @Semantics@ type class to capture the pattern.  Or one class per arity of type constructor. ++++
\begin{code}
class Semantics rep sem | rep -> sem where
meaning :: rep -> sem

class Semantics1 rep sem | rep -> sem where
meaning1 :: forall a. rep a -> sem a

-- ...
\end{code}
=== Require meaning functions to be homomorphic over standard class methods.  Here's a spec based on instance declarations. ++++
\begin{code}
instance (Semantics rep sem, Monoid sem) => Monoid rep where
meaning mempty           = mempty
meaning (r mappend r') = meaning r mappend meaning r'

instance (Semantics1 rep sem, Functor sem) => Functor rep where
meaning (fmap f r) = fmap f (meaning r)

instance (Semantics1 rep sem, Applicative sem) => Applicative rep where
meaning (rf <*> rx) = meaning rf <*> meaning rx

meaning (return a) = return a
meaning (join rr)  = join (meaning . meaning rr)
\end{code}
=== I could write these specifications as Haskell rules and/or QuickCheck tests.  Perhaps there's also a nice connection with the [[wrapper/worker transformation| http://unsafeperformio.com/paper.php?id=13]].  The instance specs above say that whenever the semantics is an instance, the representation must be also.  A weaker requirement would be that *if* the semantics and representation are both instances class, *then* the homomorphisms hold for the class's instances.
* What about @(>>=)@?  Calculate it: ++++
\begin{code}
meaning (r >>= f) = meaning (join (fmap f r))
= join (fmap meaning (meaning (fmap f r)))
= join (fmap meaning (fmap f (meaning r)))
= join (fmap (meaning.f) (meaning r))
= meaning r >>= meaning . f
\end{code}
=== Or calculate in reverse: ++++
\begin{code}
meaning (join rr) = meaning (rr >>= id)
= meaning rr >>= meaning . id
= meaning rr >>= meaning
= join (fmap meaning (meaning rr))
\end{code}
=== I like the look of the @meaning (r >>= f)@ definition and of the derived meaning of @join@ in terms of @(>>=)@.
* The @Monoid@ and @Monad@ specs for @Event@ don't work, because the semantic domain lacks the required @Monoid@ and @Monad@ instances.  Conjecture: there's a better semantics that does have the required instances, and those instances do exactly the right thing.  Start with @Monoid@.  If the model is a list, then @mempty@ (i.e., @[]@) will work fine but @mappend@ (i.e., @(++)@) won't at all be what I want.  How about some kind of @MergeList@ or @MergeStream@ that presumes ordered elements?  Or maybe just @Stream@, which doesn't have a @Monoid@ instance.  Or maybe I want a different structure.  Instead of a stream of time/value pairs, where a stream is an element and a stream, perhaps have a time and a value+stream. ++++
\begin{code}
newtype Event a = Future (a, Event a)
\end{code}
=== Or simply ++++
\begin{code}
newtype Event a = Future (Reactive a)
\end{code}
=== as in my current representation.  Hm.  While @Future@ is a monoid, it's not the monoid I want.
* Have another go at a semantically direct representation for events: as a list of future values.  Not quite a list, but rather a *merge-list*, a @newtype@-wrapping around a list, with a @Monoid@ instance that merges. ++++
\begin{code}
instance Monoid (Event a) where
mempty  = Event mempty
Event fut mappend Event fut' = Event (fut merge fut')

-- | Merge two 'Future' streams into one.
merge :: Future (Reactive a) -> Future (Reactive a) -> Future (Reactive a)
Never merge fut   = fut
fut   merge Never = fut
u     merge v     =
(onFut (merge v) <$> u) mappend (onFut (u merge) <$> v)
where
onFut f (a Stepper Event t') = a stepper Event (f t')
\end{code}
===
* Some Reactive thoughts: ++++
*  My @Future@ type is like @Maybe@ and could probably be replaced by @Maybe@, with @Never@ replaced by @Nothing@.  Check the instances.
*  The @Never@ (or @Nothing@) could be moved out of @Future@ into @Event@.
===
* A bit more correspondence about certification & needs.
* Title: "Can functional programming be liberated from the von Neumann style?".  Perhaps as a co-title with "Rescuing functional I/O".
* Working on my Reactive blog posts.  Notes: ++++
*  Think about special monoid-related functions.  Combine reactive values with @mappend@.  When values quiesce to @mempty@, remove them.
*  References must come before the {{{<!--more-->}}}, or they'll be lost to the teaser.
*  The feed came out in a truncated, text-only form.  Yuck.  I changed the RSS config from "summary" to "full text".  I hope that fixes the problem.  It did!
===
* I meant to write about @joinMaybes@ in a blog post.  Did it: [[A handy generalized filter| http://conal.net/blog/posts/a-handy-generalized-filter/]].
* I think I can reformulate my event and reactive value types in a more straightforward way, as I was exploring last week.  Represent an event as a list of time/value pairs, in which the times are monotonicity non-decreasing.  Use list finiteness rather @Never@ for non-occurring events.  The one tricky question is how to compare the times without forcing them.  Feed primitive events from Concurrent Haskell channels, using @getChanContents :: Chan a -> IO [a]@.  Can [[mergeIO| http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent.html#v%3AmergeIO]] help?
* I also want to get past this basic model of events & reactive values to an explicit arrow approach.  I think the multi-threading and indeterminacy will vanish.  The interesting bit is probably curried arrows.  Puzzle: do I have something like both events and reactive values or just one?
* Thinking about Levi's suggested example of a simple blog server.  How could one be formulated in a very functional way? ++++
*  The blog database is a varying value, defined in terms of a stream of modification requests and sampled via streams of queries.
*  The modification stream results from the interleaving of many streams.
*  The individual streams might correspond to sessions or to users.
*  Consider a query stream as containing functions to be applied to the varying blog.  It's a combination of @snapshot@ and function application: @uncurry ($) <$> (query snapshot blog)@.  It's a very important implementation detail that the function application happen on the server, to reduce transmission.
===
* Thoughts on simpler Reactive implementation: ++++
*  My goal is for the implementation to match the semantics as closely as possible: a future is a time/value pair, and an event is a list of futures.
*  The remaining puzzle is how to compare a future time with a concrete time value, without having to force the future time.
*  Idea: have the representation of a future time include a function that compares with a concrete time value.  For a primitive event (from @mkEvent@), the comparison function would first see if the actual time is known.  If so, it uses that time and does a regular comparison.  Otherwise, it uses its current notion of time.  If its current time is later than the concrete query time, then it can answer immediately.  Otherwise, it blocks until it can answer.  How does an event get a notion of current time?  It delegates to a shared clock.
*  Hm.  Still doesn't sound quite as simple as I'd like.
===
* Get to work on the next version of reactivity, based on some transformation types (probably arrows). ++++
*  One possibility is as a function that reacts to an input by producing zero or more outputs and a residual reactive transformation. ++++
\begin{code}
newtype ETrans a b = ETrans (a -> ([b], ETrans a b))
\end{code}
=== Or drop the new residual: ++++
\begin{code}
type ETrans' a b = a -> [b]

type ETrans = Kleisli []
\end{code}
=== With this latter definition, we get the arrow for free.  Is the residual @ETrans@ just an optimization?  I've forgotten.  Could it be layered on as some kind of arrow transformer, or perhaps an alternative to @Kleisli@?
*  I think @ETrans a b@ is an implementation of a commonly used subset of @Event a -> Event b@.  What about @Reactive a -> Reactive b@, or isomorphically, @(a,Event a) -> (b,Event b)@?  The initial @b@ would depend only on the initial @a@.  I'm not sure about the event part.  Try limiting it to depend only on the source event. ++++
\begin{code}
data RTrans a b = RTrans (a -> b) (ETrans a b)
\end{code}
=== Hey!  This arrow is a pair of arrows: ++++
\begin{code}
type RTrans = (->) ::*:: ETrans
\end{code}
=== @TypeCompose@ defines @(::*::)@ and its @Arrow@ instance.  Note in particular that @arr f@ is exactly what I want: apply @f@ to the initial value and to each new value.
*  Add residuals as an arrow transformer: ++++
\begin{code}
newtype Resid (~>) a b = Resid (a ~> (b, Resid (~>) a b))

instance Arrow (~>) => Arrow (Resid (~>)) where
arr h = r where r = Resid (arr (\ a -> (h a, r)))
first (Resid f) = Resid $first f >>> arr (\ ((b,r),c) -> ((b,c), first r)) Resid f >>> Resid g = Resid$
proc a -> do
(b,rab) <- f -< a
(c,rbc) <- g -< b
returnA -< (c, rab >>> rbc)
\end{code}
===
*  Next, tweak to use //optional// residuals: ++++
\begin{code}
-- Add optional residual.  A Nothing means that the old arrow value didn't change.
newtype ResidMb (~>) a b = ResidMb (a ~> (b, Maybe (ResidMb (~>) a b)))

instance Arrow (~>) => Arrow (ResidMb (~>)) where
arr h = ResidMb (arr (\ a -> (h a, Nothing)))
first (ResidMb f) = ResidMb $first f >>> arr (\ ((b,mbr),c) -> ((b,c), fmap first mbr)) ab@(ResidMb f) >>> bc@(ResidMb g) = ResidMb$
proc a -> do
(b,mbrab) <- f -< a
(c,mbrbc) <- g -< b
returnA -< (c, rebuild2 (>>>) ab bc mbrab mbrbc)

rebuild2 :: (a -> b -> c)
-> a -> b
-> (Maybe a -> Maybe b -> Maybe c)
rebuild2 _  _ _ Nothing Nothing = Nothing
rebuild2 f a b mba mbb =
Just (f (fromMaybe a mba) (fromMaybe b mbb))
\end{code}
===
*  Now some simple GUIs.
===
* Wrote [[blog response| http://conal.net/blog/posts/blending-continuity-into-reactive-values/#comment-25]] about synchronous computation in Reactivity.  The main point: "While my push implementation looks like it performs unnecessary recomputation, it needn’t, thanks to laziness."
* Idea for a nice FRP demo: something related to [[dynamic social networking relationships in IRC| http://files.codersbase.com/haskell/haskell-current.png]].
* Wrote [[another blog response to Vladimir| http://conal.net/blog/posts/blending-continuity-into-reactive-values/#comment-27]].  Contains some thouhts on integration: ++++
*  Integrate piecewise, i.e., integrate each non-reactive time function, and keep a running sum of the results.  As you noted, we have to know when the switch happens.
*  Exploit the Fun representation to integrate exactly in the special case of constant functions.  Probably extend the representation so additional cases can be integrated exactly.  Perhaps univariate polynomials.
*  What numeric integration method to use?
*  Adapt step size to available cycle.  For instance, give each integral a thread that repeatedly takes a step and yields.  Each time delta would be measured as the current (thread wake-up) time minus the time of the previous step.
*  I wonder how integration threads would interact with my idle trick.  Currently, my threads are all blocked most of the time, but the new integration threads simply yield while still runnable.  Would the new active threads over-compete with the single GUI thread handling all of the outputs?
===
* Recursive rebuilding (as in optional residue): ++++
*  @rebuild2@ from [[2008-01-27]] took in @a@ and @Maybe a@ but only used the @a@ if the @Maybe a@ was @Nothing@.  Ditto for @b@.  So instead, pass in a value and a flag saying whether the value is old or new.  I'd like to see how to make an applicative functor, so try application: ++++
\begin{code}
data RBld a = RBld a Bool  -- Bool is whether new.
rebuildApp :: b -> RBld (a -> b) -> RBld a -> RBld b
rebuildApp b (RBld _ False) (RBld _ False) = RBld b     False
rebuildApp _ (RBld f _    ) (RBld a _    ) = RBld (f a) True
\end{code}
=== Still not quite in the form of an applicative functor, because the passed in @b@.  Hm.  What if I computed @b@ even when we could use the old one?  Oh well.  Keep tinkering.  Try a little rewriting application.
===
* Reactive transformations.  ++++
* First an arrow transformer that adds a residual.  (There's a variation with an optional residual.) ++++
\begin{code}
newtype Resid (~>) a b = Resid (a ~> (b, Resid (~>) a b))
\end{code}
=== Then event transformation.  Isomorphic to a -> [b] plus residual.  Each input yields zero or more outputs and a residual. ++++
\begin{code}
type ETrans = Resid (Kleisli [])
\end{code}
=== Then simply pair up for a reactive value transformation.  Transforms initial value and events independently. ++++
\begin{code}
type RTrans = (->) ::*:: ETrans
\end{code}
=== Lovely!
*  Next, what can I build?  Work toward a simple GUI with one input and one output.  Then add more inputs, both paired and curried.  My current TV guis are all on pure values, including pure functions, so I don't need any of the fancy stuff above.  Do an example with state.  A button that increments a displayed counter.  Hm.  That example is neither @ETrans@ nor @RTrans@, but rather something in between.  Oh -- have it take an initial counter value instead of wiring in zero.  Oh, oops -- the initial value will influence the first reaction, so the neat separate above (@(::*::)@) won't work.
*  Instead, I want the initial input to influence both the initial output and the @ETrans@. ++++
\begin{code}
newtype RTrans a b = RTrans (a -> (b, ETrans a b))
\end{code}
===
*  Hmm.  This new @RTrans@ is //awfully// close to @ETrans@.  The only difference is that exactly one output is generated from the initial input.  I could unify them by relaxing the @b@ to @[b]@.  Then I'd have the unification I've been puzzling about between events and reactive values.  That way I don't have to have two kinds of transformations and two kinds of interfaces to them.  Try that.  No more @RTrans@.  Then simply ++++
\begin{code}
type ETrans = Resid (Kleisli [])
\end{code}
=== or @ResidMb@ in place of @Resid@.
*  Back to the example, now as an @ETrans@.  Where does the counter state live?  Invisibly in the @ETrans@, which is why I'll need residuals in this example. ++++
\begin{code}
-- handy utilities
etrans :: (a -> ([b], ETrans a b)) -> ETrans a b
etrans = Resid . Kleisli

etrans_ :: ([b], ETrans a b) -> ETrans a b
etrans_ = etrans . const

-- simple counter
counter :: Int -> ETrans () Int
counter next = etrans_ ([next], counter (next + 1))
\end{code}
=== Must I use a parameter?  Could I get the state to somehow ride along in the inputs and outputs?  Could I define a @ArrowLoop@ instance and somehow use @loop@? ++++
\begin{code}
class Arrow (~>) => ArrowLoop a where
loop :: (b,d)~>(c,d) -> b~>c
\end{code}
=== In particular, @loop :: ((),Int)~>(Int,Int) -> (()~>Int)@.
*  How would I want @loop@ to work?  Probably take in the previous @d@ output as the new @d@ input.  But then what about the first @d@ in?  Maybe some kind of primer.  Without, we have ++++
\begin{code}
counter :: ETrans () Int
counter = loop $etrans$ \ ((),n) -> first pure (dup (n+1))
\end{code}
=== Hm.  Come back to this question later.
===
* The [[arrows page| http://www.haskell.org/arrows]] has examples, including stream transformers, simple automata, and Fudgets-style stream processors.  About automata, "another model of dataflow languages uses the type": ++++
\begin{code}
newtype Auto b c = Auto (b -> (c, Auto b c))
\end{code}
===
* Curried ("higher dimensional"?) automata relates to livecoding.  Given @a :-> b :-> c@, when @a@ occurs, I want a new @b :-> c@, and I want it to start running where the old @b :-> c@ left off.
* I think of @a :-> b@ as an efficient representation of @[(Time,a)] -> [(Time,b)]@.  Explicitly state the interpretation of @a :-> b@ and then look at curried examples.  Hm -- there's no notion of time in @a :-> b@.  I could add it.  Or find a more abstract variant of @(Time,a)@.  Semantic function: ++++
\begin{code}
meaning :: (a :-> b) -> [(t,a)] -> [(t,b)]
meaning _ [] = []
meaning (Resid (Kleisli h)) ((t,a) : tas) =
map (t pair) bs ++ meaning etr' tas'
where (bs,etr') = h a
\end{code}
===
* Aside from possible use in continuous behaviors, time is just for ordering event occurrences.  In particular, it gives us a way to merge multiple input or output streams.  Still, it may be unnecessary, unnatural, and unimplementable in general.  We physical beings resolve the problem of multiple input streams by having the streams transmit across a (complex) medium.
* What classes have instances for @[(t,a)] -> [(t,b)]@ and which for @a :-> b@?  Require the latter instances to be consistent with the former (via the @meaning@ function).  Monoid & Functor look easy enough.  How about Applicative?  What would @pure b@ be?  In the semantic model, @pure b = (pure.pure) (minBound,a)@.  I don't think that meaning can be expressed in the implementation model, since it "reacts" without any stimulus.  What about @(<*>)@?  Easy in the semantics, but I how in the implementation?  I guess I could accumulate all function occurrences and all argument occurrences as I go.  When a function occurs, yield applications of it to all arguments so far.  When an argument occurs, yield applications of it by all functions so far.  I think that'd work.  Monad?  Same problem for @return@ as @pure@.  I think @join@ would work out fine but require remembering the entire past input stream to feed to the new bots.  Hm -- what semantics do i want?  I could decide that new bots respond only to input events since their creation.  But then I doubt that @ap == (<*>)@.  Hm.  I'd like to keep the expressiveness of the @Event@ monad, and I don't see how.
* Okay, so @a :-> b@ represents (some of) @Event a -> Event b@?  What about @Event a -> Event b -> Event c@, or other combinations like @(Event a, Event b) -> Event c@?  I know that @(Event a, Event b) =~= Event (a + b)@, and so @Event a -> Event b -> Event c =~= Event (a + b) -> Event c@.  How do I use those facts?
* How to kick an IRC spammer: +++
{{{
<conal> Cale: would you mind telling me what steps you took to kick AngelinaJolie26?  i have the op
permission but not the know-how.						      [23:20]
<conal> Cale: or maybe point me to a how-to page
<Cale> I did /cs op #haskell
<Cale> which in my IRC client gives me ops
<Cale> (the /cs command is bound to sending chanserv a message)
<shachaf> conal: (/msg chanserv could also work.)					      [23:21]
<Cale> and then  /mode #haskell +b AngelinaJolie26
<dblhelix> couldn't it be that she's just interested in Haskell? ;-)
<Cale> That's just a nick ban, so it could change its nick and get back in, but it's unlikely to do
that.
<Cale> (seeing as it looks like a crapflooding robot, given the quits due to excess flood)    [23:22]
<jua> nick!ident@host ? is . * is .*
<jua> wouldnt the bot almost surely hcange its name? heh
<Cale> Most bots are not that smart :)							      [23:23]
<dblhelix> regarding policy: what about bots written in haskell? shoud we cut those some slack or
what? ;-)
<conal> Cale: and to release ops?
<Cale> Well, I usually do that through the menus in my client, but it's /mode #haskell -o Cale
<conal> Cale: thanks.  notes stashed.
}}}
===
* Cool web post from Feb 2007: [[Haskell and Web Applications| http://www.defmacro.org/ramblings/haskell-web.html]].  Uses the @Data@ class to automate rendering of Haskell data types into html.
* It's my birthday.
* Arrow-styled reactivity: ++++
*  Maybe I can use @[i] -> [o]@ after all, with the assumption that each input triggers exactly on eoutput.  Or really @Stream i -> Stream o@.  Use the inherited monoid, which requires @o@ to be a monoid.   Then, e.g., @o@ could be @Maybe b@ or @[b]@.  For convenience, perhaps wrap up the composition into a new type, deriving almost all of its implementation.
===
* Writing NVC Evolves post "Distracted by faux needs?"
* Infix expression hack: +++
{{{
<ddarius> conal: I'm sure you are aware of the various "infix expressions" hacks
<EvilTerran> f <|fmap.first.fmap|> x
<conal> i'd forgotten.  i like the look of it.  what are the defs & fixities?
<EvilTerran> infixl 1 <|, |>; (<|) = flip ($); (|>) = ($) -- this works
<conal> EvilTerran: thanks.  i'll try it out in my programming.  i have a lot of things like (fmap.fmap).
<EvilTerran> if you make 'em infixr and tweak the definitions a bit, you can make "right sections" work too
<EvilTerran> (with that version, only left sections work)				      [12:45]
<conal> EvilTerran: btw, who came up with that nifty hack?
<EvilTerran> (as in, (f <|fmap.fmap) does what you expect, but (fmap.fmap|> x) doesn't)
<EvilTerran> er... me, as far as i know :)
<ddarius> conal: It was on the mailinglist many many years ago.
<conal> EvilTerran: thx for the warning
<EvilTerran> but someone else almost certainly thought of it first
}}}
===
* "Console FRP".  How can someone write a console program (text in & out) with FRP style and semantics?
* Hm: ++++
\begin{code}
class Arrow (~>) => ArrowApply (~>) where
app :: (b ~> c, b) ~> c
\end{code}
===
* Here's an example ++++
{{{
<Saizan> conal: first example i could think of: f xs = "Insert a string:\n" ++
let s = takeWhile (/= '\n') xs in seq (length s)
"Here is your string in uppercase:\n" ++ map toUpper s ++ "\n"
<conal> Saizan: thx.									      [13:58]
<conal> Saizan: btw, why the seq?  does it correspond to something implicit in an imperative
formulation?
<Saizan> conal: with no seq "Here is your string in uppercase:" would be printed before the user
inserted the string, also, each character would be immediatly repeated in uppercase
<Saizan> conal: having to tweak this things with seq it's most of the weirdness
<conal> Saizan: ah!!  tricky!  thanks again.  :)
<conal> Saizan: exactly the kind of thing i'm looking for.
}}}
===
* Oh, hey!  The @Applicative@ semantics for event-bots (stream transformers and ouput+residual) has become synchronous in both reps.  I didn't realize that.  With just events, I couldn't align functions and arguments, so I did a cross product.  With my current bots (transformations), the alignment is in the representation.  I guess I'll have the stream elements themselves be lists, and I'll form the cross-products of corresponding pairs of lists.  If either is empty (non-occurrence), the product is empty.  If both are singletons (unique), the product is.
* Oh.  The listy bot representation doesn't do what I thought.  Definition: ++++
\begin{code}
newtype Resid (~>) a b = Resid (a ~> (b, Resid (~>) a b))

type (:->) = Resid (Kleisli [])
\end{code}
=== I wanted one input to generate a list of outputs and exactly one residual.  Instead, I get a list of output/residual pairs.  Oh hey, I think I get a nondeterministic bot, with branching at every transition.
* Blog post: [[Distracted by faux needs?| http://evolve.awakeningcompassion.com/?p=24]].  Note to NVC-cert group.
* The residual transformer is in {{{Control.Arrow.Transformer.Automaton}}}.  Ross has lots of functionality there, including @ArrowLoop@ and @ArrowCircuit@.  The latter introduces @delay@. ++++
\begin{code}
class ArrowLoop (~>) => ArrowCircuit (~>) where
delay :: b -> (b ~> b)
\end{code}
=== Hm.  Is there anything left to do after @Automaton@?
* newsham on #haskell pointed me to [[a video| http://video.google.com/googleplayer.swf?docId=-7171501518308861626]] on [[MathMap| http://www.complang.tuwien.ac.at/schani/mathmap/]], which has some similarity to Pan & Eros.  It's sort of like Gerard Holzmann's language (Digital Darkroom) and like Filter Factory, but with a boxes-and-arrows editor for composition.  Seems to work pretty well.  The [[language| http://www.complang.tuwien.ac.at/schani/mathmap/language.html]] and interface look pretty ad hoc, in treating images specially rather than just one type within a general, consistent design.  For instance, there's a "filter" keyword and special identifiers "xy", "r".  I think the only types are booleans, numbers, tuples, and images.
* My "faux needs" post has generated some attention, as has Emergence.
* Idea for arrows & reactivity post: introduce the stream-function rep, and then explain the correlation problem as motivation for the residual-bot rep.
* Ross's arrows library also has a stream arrow transformer: ++++
\begin{code}
newtype StreamArrow a b c = Str (a (Stream b) (Stream c))

instance Arrow a => Arrow (StreamArrow a) where
arr f = Str (arr (fmap f))
Str f >>> Str g = Str (f >>> g)
first (Str f) =
Str (arr Stream.unzip >>> first f >>> arr (uncurry Stream.zip))
\end{code}
=== which is equivalent but more general than I'd written (replacing @(->)@ with an arbitrary arrow).
* Post: [[Literate Haskell Blogging| http://olsner.se/2008/02/05/literate-haskell-blogging/]].  I folded in olsner's patch to {{{markdown.php}}} and removed one comment character, to remove the leading "{{{> }}}".
* Instead of "residual bots", call them "mutant bots", because they mutate after each reaction.
* I just discovered that @Monoid@ and @MonadPlus@ work differently on @Maybe@ values.  Monoid requires that the type parameter be a monoid also. ++++
\begin{code}
> Just "foo" mappend Just "bar"
Just "foobar"
> Just "foo" mplus Just "bar"
Just "foo"
\end{code}
=== I was explaining that the @Maybe@ monoid does a biased choice for choosy bots, but that's not the case.  EvilTerran noted that "MonadPlus Maybe matches up with Monoid (First a)".  I switched from @Maybe@ to @First@ for choosy-bots.
* At I(An)Ok's reminding, sent a reply to Margo P about certification.
* What else do I want to say in my blog post (or follow-ons)? ++++
*  Reactive stuff: accumulation, filtering, snapshot.  For snapshot, I think I'll want a notion of varying values.
*  Examples in graphics or guis.
===
* Here's a thought on prompt&input text interfaces: the human user is the reactive system.  Prompts are the inputs and the user's responses are the outputs.  Consider a classic Mealy automaton, unwound: ++++
\begin{code}
a -> (b, a -> (b, a -> (b, ...)))
\end{code}
=== After one input, we get a single output and a residual automaton: ++++
\begin{code}
(b, a -> (b, a -> (b, ...)))
\end{code}
=== However, we can also think of this latter type as a prompt and a way to process the input.  So, let's refactor a bit: ++++
\begin{code}
newtype GetPut a b = Get (a -> PutGet a b)
data    PutGet a b = Put  b   (GetPut a b)
\end{code}
===
* Compare these @Get@ and @Put@ with stream processors as described on [[the arrows page| http://haskell.org/arrows/]]: +++>
''Fudgets-style stream processors'': These are processes that can decide whether to input or output at each step:
\begin{code}
data SP a b = Put b (SP a b) | Get (a -> SP a b)
\end{code}
These may be cast as arrows, but are usually used as dual arrows.
=== The difference is that Fudgets-style stream processors can either get or put at each step, while I'm requiring a strict alternation.  Consider Saizan's examples from [[2008-02-02]]: ++++
\begin{code}
f xs = "Insert a string:\n" ++
let s = takeWhile (/= '\n') xs in seq (length s)
"Here is your string in uppercase:\n" ++ map toUpper s ++ "\n"
\end{code}
=== Now with monadic IO: ++++
\begin{code}
f = do putStrLn "Insert a string:"
s <- getLine
putStrLn $"Here is your string in uppercase:\n" ++ map toUpper s \end{code} === The @SP@ and separated @Get@/@Put@ formulation are identical: ++++ \begin{code} \ k -> Put "Insert a string:"$
Get $\ s -> Put ("Here is your string in uppercase:\n" ++ map toUpper s)$
k
\end{code}
=== The @k@ argument is a continuation of type @GetPut String String@ or @SP String String@.	In this case, I'm not allowing it access to the string @s@.
* Perhaps @GetPut@ is to events as @PutGet@ is to reactive values.  In Reactive, a reactive value is a value plus an event.
* Wordpress: ++++
*  {{{wp-includes/query.php}}} lists (URL) query types in the fill_query_vars function.  (Thanks to fitztrev for the tip.)  There are also [[some query types on the codex| http://codex.wordpress.org/Linking_Posts_Pages_and_Categories]].  The query {{{?p=*}}} gives all posts.
===
* Correspondence with Peter V about Eros & related.
* The @GetPut@ and @PutGet@ types from yesterday are applicative functors. ++++
\begin{code}
newtype GetPut a b = Get (a -> PutGet a b)
data    PutGet a b = Put  b   (GetPut a b)

instance Functor (GetPut a) where
fmap f (Get h) = Get (fmap f . h)

instance Applicative (GetPut a) where
pure b = Get (const (pure b))
Get h <*> Get k = Get $\ a -> h a <*> k a instance Functor (PutGet a) where fmap f (Put b g) = Put (f b) (fmap f g) instance Applicative (PutGet a) where pure b = Put b (pure b) Put f pf <*> Put x px = Put (f x) (pf <*> px) \end{code} === We could also write @GetPut@ instances in a more general form, working when @(->) a@ (function from @a@) is replaced by an arbitrary applicative functor: ++++ \begin{code} instance Functor (GetPut a) where fmap f (Get h) = Get ((fmap.fmap) f h) instance Applicative (GetPut a) where pure b = Get ((pure.pure) b)) Get h <*> Get k = Get$ liftA2 (<*>) h k
\end{code}
=== Oh -- now the implementations look very familiar indeed.  This code always crops up when two functors or two applicative functors are composed.  If we make the composition explicit, then we get the @Functor@ and @Applicative@ instances automatically.  We could write ++++
\begin{code}
type GetPut a = (->) a :. PutGet a
\end{code}
=== What about @PutGet@?  If we cross our eyes, we can almost see two @fmap@ applications for @fmap@ and two @(<*>)@ applications for @(<*>)@.  To really see it, replace @b@ with @Id b@.  For a more standard form, use pairing instead of a two-argument constructor. ++++
\begin{code}
type PutGet a b = (Id b, GetPut a b)
\end{code}
=== Now we can rewrite as pairing of type constructors: ++++
\begin{code}
type PutGet a = Id :*: GetPut a
\end{code}
=== By making the type constructor pairing explicit (as we made composition explicit in @GetPut@), we can now get the @Functor@ and @Applicative@ instances for free, from those instances on @f :.: g@.  One hitch is that we now have mutually recursive type synonyms.  I though I could introduce @newtype@ wrappers and use an explicit @deriving@ to promote our class instances, but I haven't been able to come up with a variation that GHC will swallow.
* On #emacs: ++++
{{{
<conal> how in emacs do i get my buffers to be saved on quitting & restored on start-up?  i used to
do it many years ago, and i've forgotten.
<lawrence> ,desktop									      [14:49]
<fsbot> DeskTop is, like, [0] at http://www.emacswiki.org/cgi-bin/wiki.pl?DeskTop
<fsbot> [1] see http://www.emacswiki.org/cgi-bin/wiki/SessionManagement
<proqesi> conal: M-x desktop-save
<conal> proqesi: thanks :)
<jstanley> conal: and add a quit hook for that, etc, to automate it
<conal> jstanley: simple enough.  thx.							      [14:50]
}}}
===
* Working on a blog post "Functional reactive partner dancing" about the @GetPut@ and @PutGet@ types from yesterday.  I renamed them to @Follow@ and @Lead@.
* I've been following my nose for a while.  What am I aiming for? ++++
*  Get a robust & efficient basis for FRP for my various projects, including Phooey, TV, Eros, and piq.
*  Reconcile the type-flexibility of TV with the simplicity/robustness of Bot.
*  Inspiration for writing blog posts & conference papers.
===
* One of the puzzles I've had about arrow-based reactivity is how to distinguish between events and reactive values.  Perhaps the Lead/Follow dance suggests an answer.  Here are the definitions. ++++
\begin{code}
newtype Event b = Future (Reactive b)
data Reactive b = Stepper b (Event b)
\end{code}
===
An event starts out in waiting mode, until it becomes (catches up with) a reactive value.  A reactive
value, initially has a value and then acts like an event.  These two definitions are very like my @Follow@ and @Lead@.  They use futures where the arrow-based formulations have explicit inputs (of type @a@).
* How to get type-flexibility with an arrow interface?  Try some simple types for events and varying values with an implementation that maps them into bots.  The mapping exploits some isomorphisms -- currying and pair/either.
* I just realized that my @ChoosyBot@ representation might not be what I really want.  Here's the definition, from [[Invasion of the composable Mutant-Bots| http://conal.net/blog/posts/invasion-of-the-composable-mutant-bots/]]  ++++
\begin{code}
newtype ChoosyBot i o = Choosy (MutantBot i (First o))

newtype First a = First (Maybe a)  -- (in Data.Monoid)

type MutantBot = Automaton (->)
\end{code}
=== In that version, when there is no output for a given input, the bot mutates anyway.  Perhaps I'd like to allow mutation only when there really is an output.  That way perhaps there would be nothing to propagate or remember on non-responses.  I think change simplifies the code: ++++
\begin{code}
newtype ChoosyBot = Automaton (Kleisli First)
\end{code}
=== which is isomorphic to @ChoosyBot a b = a -> First (b, ChoosyBot a b)@.
* On the other hand, if I did a similar trick for chatter-bots (with @[]@ in place @First@), then I'd have a list of mutants.  I don't know what I'd do with them.
* Experiment: program a simple curried TV on top of Bot.  Then see how I might extract that behavior automatically from a TV spec.  Example: product of two numbers entered by sliders, showing the result textually.  I can make the bot statically and then dynamically attach it to the input & output widgets.  Since there are two @Int@ inputs, my bot has type @Either Int Int :-> Int@.  In addition to the change events, there will be initial input values and an initial computed product. ++++
\begin{code}
chatter :: (a -> (b, a :-> b)) -> a :-> b
chatter f = Chatter (Automaton ((pure *** unChatter) . f))

prod :: Int -> Int -> (Int, Either Int Int :-> Int)
prod a b = (a*b, chatter next)
where
next (Left  a') = prod a' b
next (Right b') = prod a b'
\end{code}
=== Refactoring a bit: ++++
\begin{code}
prod2 :: (Int,Int) -> (Int, Either Int Int :-> Int)
prod2 (a,b) = (a*b, chatter (prod2 . next))
where
next (Left  a') = (a',b)
next (Right b') = (a,b')
\end{code}
=== I think I could refactor more, abstract out the pattern of a reactive pair of values.  Then @fmap (uncurry (*))@ over the pair bot.  How might i write @prod@ more elegantly?
* How does @Either Int Int@ relate to @(Int,Int)@?  Idea: the former is an encoding of (a subset of) changes to the latter.  @Left a@ represents @first (const a)@, and @Right b@ represents @second (const b)@.
* Blog post [[Accumulation for functional reactive ChatterBots| http://conal.net/blog/posts/accumulation-for-functional-reactive-chatter-bots/]].
* Chat with Ivan T (svrog) about Reactive.
* Here's an elegant way to turn encodings of pair modifications into a stream of pairs: ++++
\begin{code}
updPair :: Either c d -> (c,d) -> (c,d)
updPair = (first.const) either (second.const)

-- updPair (Left  c') (_,d) = (c',d)
-- updPair (Right d') (c,_) = (c,d')

editPairL :: (c,d) -> Either c d :>- (c,d)

editPairF :: (c,d) -> Either c d :-> (c,d)
editPairF cd = updPair ^>> accumF cd
\end{code}
===
* Hm -- try to do something like  ++++
\begin{code}
a:>-Int -> a:>-Int -> a:>-Int
\end{code}
=== where @a@ is the environment from which the two input "events" are extracted.  I think then I can program in a @Reactive@ style.
* Tomorrow make a first software release (darcs only) with @Bot@ and @LeadFollow@, so that people can look at the code.
* Posted [[Functional reactive partner dancing| http://conal.net/blog/posts/functional-reactive-partner-dancing/]].
* Check out [[link| http://www.cs.mu.oz.au/~dmo/se-litreview/thesis.html]] from Christophe P (vincenz).  It's timing out now.
Here are my thoughts on using NVC in debate settings:
* Let go of any idea that anyone is right and anyone is wrong.  Utterly abandon your position.
* Release any desire to win or persuade.
* Focus your curiosity on the beautiful underlying motivations (needs) behind all of the expressed positions.
* Connect with gratitude for the person whose position you had opposed, since they helped to carry part of a shared creative tension that you didn't want to carry.
===
* Bots: ++++
*  Thinking about the idea from [[2008-02-10]]: ++++
\begin{code}
a:>-Int -> a:>-Int -> a:>-Int
\end{code}
=== Maybe I can program in my old (non-arrow) style but with these polymorphic types.  What does this shift in formulation give me?  I think it's a clear & implementable semantics, including deterministic merging.  With "chatty" bots, the common input stream gives a simple basis for merging.
*  Oh, oops!  I started thinking of leads (@a :>- b@) as like @Reactive b@, but the @Applicative@ instances do correspond.  For leads, @f <*> x@ outputs only when *both* @f@ and @x@ output.
*  Idea: represent a varying value as an initial value plus a stream of future *edits* (functions).  The @Reactive@ representation is an value plus stream of future *values*, and it's tricky to do pairing.  I could even use a @const@ edit initially. ++++
\begin{code}
newtype a EditL b = EditL (a Lead   Endo b)
newtype a EditF b = EditF (a Follow Endo b)
\end{code}
=== I don't have to add anything for empty or multiple, since @Endo b@ is already a monoid under @id@ and @(.)@.  Here's an @Arrow@ instance.  I run into trouble with sequential composition.  ++++
\begin{code}
instance Arrow EditF where
arr h = EditF $(Endo . const) <$> arr h
first (EditF fol) = EditF $-- first fol :: (a,c) Follow (Endo bf, c) first fol >>^ \ (Endo bf, c) -> Endo (bf *** const c) EditF ab >>> EditF bc = EditF$
ab >>> arr (appEndo error "no initial value") >>> bc
\end{code}
*  Another possibility is to consume @Endo a@ instead of @a@, i.e., map edit streams to edit streams.  Then I'm ready by default to compute incrementally.  @first@ and @(>>>)@ are easy, but I don't see how to do @arr@.  Again, I'm missing an initial value.  What would it take for @arr@ to work out?
===
* Blog post: [[Applicative bots| http://conal.net/blog/posts/applicative-bots/]].
* Correspondence with Pierre-Evariste.  He's ported his "functional reactive overlay" stuff from OCaml to Haskell (at my suggestion) and incorporated an arrow style.
* Correspondence with Ross Paterson about arrow lib stuff.
* What next?  I have plenty of blog ideas.  How about getting my new bot stuff running, instead of just compiling?  Oh -- fitting it to TV.  Now I remember why I did the "Applicative bots" post.  Because the @Applicative@ instance for @((:>-) a)@ didn't do what I wanted.  Now it does.  Again, ++++
\begin{code}
add :: a:>-Int -> a:>-Int -> a:>-Int
\end{code}
=== How do I hook up @add@ to my GUI?  How to synthesize the arguments?  What's the ultimate source (@a@)?  In theory, it's buried way in the window system and already diced up by the time individual widget event handlers get access.  If I were designing a GUI toolkit from the bottom up, I'd make the source be the raw, global-coordinate events.  Building on another platform, I guess I have to assemble a new source from parts and then divvy up the result.  Wonk?!
* (Continuing...)  What's the point?  I think it's about being able to separate out events, mess with them (@fmap@ etc), and put them back together.  Filtering drops values but puts in blanks, so as to re-correlate later.  Similarly, merging can press simultaneous values into a single slot, again for correlation.  But the point really is correlation, isn't it?  Previously I've used a shared notion of //time// in the semantics (e.g., in Fran and Reactive).  I haven't quite seen how to implement time comparison when some of the times aren't yet known precisely.  In a sense, the bots use a unary/Peano representation of time ordinals, in which the blanks represent @Succ@.  Or maybe a trie, indexed by unary times.
* Another reason for explicit inputs is to allow modular transformation of a reactive system, transforming output and (inversely) input.
* If there's a single source of inputs, the notion of "time" can be simply an input ordinal.  Not really related to time -- just for ordering.
* On the other hand, any notion of time may be unnatural for reactivity.  Maybe I want something more like dependency or cause & effect, which is more like the Bot formulations.
* In the morning, try http://haskell.org/gtk2hs/win32/gtk+-dev-2.10.14-win32.tar.bz2 .  First, retry the .zip & .gz I already downloaded.  I wonder if a virus tweaked my machine.  I got a warning today.
* Right/wrong discussions on NVC-cert
* Tweaking Bot to work with Ross's new arrows lib.  He added some instances so I wouldn't have to host orphans.
* Try out [[Console| http://sourceforge.net/projects/console]]: ++++>
Console is a Windows console window enhancement. Console features include: multiple tabs, text editor-like text selection, different background types, alpha and color-key transparency, configurable font, different window styles.
=== Saw on [[Magnus Therning: Haskell on Windows| http://therning.org/magnus/archives/338]].
* Posted [[Pairs, sums, and reactivity| http://conal.net/blog/posts/pairs-sums-and-reactivity/]].
* I think I can generalize my bot accumulation combinators to any @ArrowCircuit@, which is a subclass of @ArrowLoop@ that adds @delay :: a -> a ~> a@.
* I think in Yampa inputs are tupled instead of summed.  There's no way to capture changes to part of the input, and so unnecessary recomputation happens.  A possible improvement is to take in a representation of change to part of the input.  One way to represent partial changes is via sums.  For instance, a change to @(a,b)@ could be a new first or second element, signified by @Left a@ or @Right b@.  However, @a@ and @b@ can be structured as well, so we have to nest sums.  A general notion of change is endomorphisms, but, being functions, endomorphisms are opaque.  I could use an explicit data type for representing endomorphisms.  For instance, ++++
\begin{code}
data Change a :: * -> * where
Konst  :: a -> Change a                       -- arr.const
Idy    :: Change a                            -- arr id
First  :: Change a -> Change (a,b)            -- first
Second :: Change b -> Change (a,b)            -- second
ThenCh :: Change a -> Change a -> Change a    -- (>>>)
\end{code}
=== These constructors all correspond to functions on arrows, so they can be used in a more general setting than endomorphisms.  Maybe add in @Result@ as well (requires deep arrows).  Instead of using @Left a@ to encode a changed first element, use @First (Konst a)@.  This @Change@ type is more flexible than sums.  I don't have to decide on change granularity up front.
* Another interpretation of @Change@ is as [[partial values| http://conal-elliott.blogspot.com/search/label/partial%20values]].
* So, how about a change/edit-based Bot type?  I don't know how to implement @fmap@ or @arr@.
* IRC channel #radeon topic "Support and development for open-source ATI driver - for fglrx use #ati".  cjb and quicksilver are on #haskell and #radeon.  Maybe I could get help there.
* Here's a simple way to implement reactivity: represent event as a lazy zip-list of maybes or lists, as in choosy-bots or chatter-bots.  Assume/require/arrange that all such lists are synchronized.  How?  Derive every event and reactive value as a map or scan/accum over the same base stream.  Using @ZipList@ means I get the @Applicative@ instance I want.  How do I make these streams?  Also, i'm not crazy about all the blanks.  The more events I have, the more blanks in all of them.  Keep noodling.
* See about implementing @Data.Future@ deterministically and correctly (consistent with my simple denotational semantics).  Define a "future time" abstract data type.  Here's the heart of deterministic temporal merging: ++++
\begin{code}
merge :: [(Time,a)] -> [(Time,a)] -> [(Time,a)]
[] merge qs = qs
ps merge [] = ps
ps@(p@(s,_):ps') merge qs@(q@(t,_):qs')
| s <= t     = p : (ps' merge qs )
| otherwise  = q : (ps  merge qs')
\end{code}
=== Hm.  Not quite.  We might not know whether we've run out of occurrences.
* Try the bot style again for GUIs.  Work out how to build up the composite (sum or Change/Edit) input type.  Use the Event pair/sum isomorphism.
* Oh -- maybe the whole sum/edit thing is just an optimization of a simpler Fruit/Yampa-like model, in which full input come in and full outputs go out on every step.  With the optimization, we get the full in & out initially and then just edits.
* As in Fudgets and Fruit, define a higher level arrow that hides user input and visual output from the types.  Use the simple bot model: ++++
\begin{code}
type (:->) = Automaton (->)
\end{code}
=== or ++++
\begin{code}
type (:->) = StreamMap
\end{code}
=== Then add the UI layer ++++
\begin{code}
newtype a :~> b = (User,a) :-> (Pic,b)
\end{code}
=== where @User@ is the whole state of the user input (mouse position, keyboard state, button state) and @Pic@ is the whole of the visual appearance.  Oh!  This is exactly Fruit's model.
* Hm.  Why didn't Fruit's GUI model catch on?  I posed that question on the yampa mailing list.
* I think the @Monad@ interface thwarts retained-mode presentation, but @Applicative@ and @Arrow@ don't.  In particular, GUI libs are retained mode, in that widgets are stateful and reused.  Another advantage of @Applicative@ and @Arrow@ over @Monad@ is that duplication is explicit, which may help for destructive optimization.
* [[New Features for Perpubplat and Ruminations on Service APIs for the Web| http://reddit.com/goto?rss=true&id=t3_69738]]
* I re-read "[[Genuinely functional user interfaces| http://www.apocalypse.org/pub/u/antony/work/pubs/genuinely-functional-guis.pdf]]" last night.  I like the simple & clear functional model.  Could one use a mainstream GUI lib underneath and //still// have clear GUI semantics?  Can one have TV-like composability?  I'd like to use Fruit as a starting point for 3D GUIs.
* I suspect that my Reactive stuff (or something like it) could benefit dynamic web content generation.  Example: I installed wp-latex recently, so I could have latex-formatted bits on my blog.  I also tweaked the PHP code a bit.  Similarly, I have markdown rendering, also tweaked a bit.  Now I wonder: when does all this stuff execute?  What if I tweak (a) post content, (b) markdown plugin code, (c) wp-latex plugin code, (d) markdown or wp-latex plugin settings?  Does it all happen on every page download?  I think wp-latex does some caching.  I guess it'd have to have smarts about when the cache becomes invalid (when some or all plugin settings change).
* Consider a yampa-like (signal transformer) model but optimized so that input & output values are represented differentially.  (Be sure to re-read "[[Dynamic Optimization for Functional Reactive Programming using Generalized Algebraic Data Types| http://www.cs.nott.ac.uk/~nhn/Publications/icfp2005.pdf]]".)  Derive an efficient push-based implementation that works with edits, i.e., some limited incremental/adaptive computation.  Perhaps represent a flow (reactive value) as a value and a stream of timed edits.  Sometimes (often) we'll have to compute actual values, accumulated from edits.  Probably worth caching full values right into the stream, for sharing, and rely on laziness to allow them to go uncomputed when unused.  Instead of stream functions, I could use an automaton, to regulate the back-and-forth, correlating outputs & inputs.  Plain old stream functions or automata would suffice, rather than choosy or chatty bots, since edits can be empty (identity) or compositions.
* What kind of incremental computation can I do?  The signature of @fmap@ doesn't leave much room for optimization.  I can do @Idy@ (no change), but that's it.  What specialized optimizable patterns are there?  How about @first f@ applied @second g p@? ++++
\begin{code}
first f . second g == second g . first f
== f *** g
== \ (a,b) -> (f a, g b)
\end{code}
===
The goal is to get the full functionality of a general purpose, programmer-friendly markup language like markdown.  One example is image embedding.  Another is friendly links (no visible url).  The idea is to make a future haddock be a *preprocessor* that generates [[pandoc| http://johnmacfarlane.net/pandoc/]]'s extended markdown (or some such).  Documentation would be mostly markdown, with very few extensions for code documentation ('foo' and "Foo.Bar", maybe a bit more).  Most of the doc would simply be passed through untouched.  The code-doc extensions would get rewritten into standard markdown and mixed in with the rest.

Pandoc could then take the generated markdown and produce HTML, LaTeX, DocBoook XML, etc.

Perhaps there will be ways in which markdown falls short in expressiveness.  If so, I'm guessing the shortcomings wouldn't be specific to the task of code documentation, and so could be approached as improvements to markdown/pandoc (which is written in Haskell).

Since the old and new doc languages would be quite incompatible, we might want to specify in a .cabal file which language to use.
* Correspondence: ++++
*  Emergence
===
* Correspondence on haddock/markdown, Fruit, and Yampa.
* Ivan T pointed me to [[andLinux| http://www.andlinux.org]], which is "a complete Ubuntu Linux system running seamlessly in Windows".  Also to [[Johnny Chung Lee| http://www.cs.cmu.edu/~johnny]], who has a bunch of neat stuff, including wii remote projects.
* Playing again with applying Warren Burton's "improving values" idea to event occurrence times. ++++
*  Here's a first shot: ++++
\begin{code}
-- | Progressive information about a value (e.g., a time)
data Improving t = AtLeast t (Improving t) | Exactly t

instance Ord t => Ord (Improving t) where
Exactly s compare Exactly t = s compare t
AtLeast s u' compare v@(Exactly t) =
if s > t then GT else u' compare v
u@(Exactly s) compare AtLeast t v' =
if s < t then LT else u  compare v'
u@(AtLeast s u') compare v@(AtLeast t v') =
-- move forward where we know less
if s <= t then
u' compare v
else
u  compare v'
\end{code}
=== Similarly for @min@.  How would I construct these improving values?  One possibility is from a list of occurrence/non-occurrence times (say @[(Bool,t)]@).  That list could come from a channel fed regularly with non-occurrences.  But then I'd be back to doing lots of busy work when nothing is going on.  I don't know where else to go with this ideas as is.
*  Here's a variation on the previous idea.  Define a //non-recursive// type of partial info about ascending values: ++++
\begin{code}
data PartialT t = AtLeast t | Exactly t
\end{code}
=== Now, represent our times via a ref that holds a @PartialT@ and a way to update it.  Wrap @unsafePerformIO@ around its uses.  To ensure referential transparency, make sure that extractable information content is always monotonically increasing.  How?  The successive ref value increase in information, and queries use information from the ref values, fetching more information as needed.  In a GUI setting with a main "event loop", I can have the updater action ask the time of the last event  occurrence.  One more thing: when comparing times, I might have an @Exactly s@ for a //future// @s@, and an @AtLeast t@ for a past or present @t@.  (Example: the first event is a timer.)  In that case, I want to simply block until the comparison can be made, i.e., until time @s@ is passed or the second time somehow becomes known.
*  Oh -- here's a more systematic way to think about the problem.  I want to know whether @s <= t@.  There are a few different ways to answer the question: ++++
*   Compute @s@ and @t@ exactly and compare.
*   Compute @s@ exactly and prove that @t >= s@ (without having to know @t@ exactly).
*   Compute @t@ exactly and prove that @s > t@.
=== To determine @s <= t@, combine these three methods.  Spin off one thread per method and go with whichever answer comes back first.  Since all successfully terminating methods are guaranteed to agree, I don't care about determinism.  I suspect Warren Burton's "improving values" papers are about this idea.  I've found one but not the other.  The first method is applicative, i.e., it combines two independent inquiries into the future (@s@ and @t@).  The second and third are monadic, in that the second half of the inquiry depends on information from the first half.
*  How do I prove that @t >= s@ for a known @s@ and an unknown event occurrence time @t@?  I find the source of @t@ and ask it to wait until all occurrences by time @s@ are known and then check that @t@ is still unknown.  If @t@ is associated with a central event loop, then see if @s@ has already happened for that source (the usual case, I expect).  If not then sleep until the event loop gets to @s@.  After the possible sleep.
* I could represent these inquiries as @IO a@ or @IO (Maybe a)@ (cheap and deterministic, e.g., reading an @MVar@).  Represent failure as nontermination or @return Nothing@, respectively.
* Idea: have the @Future@  monoid use this parallel-or operator, in which successfully terminating arguments must agree.  My current @Future@ @mappend@ does not expect them to agree.
* Where does this waitable "source" come from?  Suppose I have the @mappend@ of two events (not futures) with different event sources?  I guess each future has this sleep ability.  The "earlier" operator combines the sleepers.
===
* Ordered books on chaos and fuzzy logic for Luke, via abebooks.com.
* Check out [[zedenem's citeulike| http://www.citeulike.org/user/zednenem]]
* Suppose I define and use a "Tactic" monad with future times. ++++
\begin{code}
exactT :: FTime -> Tactic Time
(*<=), (*<), (*>=), (*>) :: FTime -> Time -> Tactic Bool
(<=*), (<*), (>=*), (>*) :: Time -> FTime -> Tactic Bool
alt :: Tactic a -> Tactic a -> Tactic a
\end{code}
=== To decide whether @sf <= tf@ for @sf, tf :: FTime@, try three queries in parallel.  The first tactic evaluates both times and compares.  Each of the other two determines one value exactly and then tries to prove how the other relates. ++++
\begin{code}
liftA2 (<=) sf tf
alt (exactT sf >>= (<=* tf))
alt (exactT tf >>= (sf *<=))
\end{code}
=== How to implement @sf *<= t@?  Ask @sf@'s source to wait until @t@ and then verify that @sf@ is not known.  If @sf@ is known, the tactic fails.  In that case the first tactic can succeed.
* Perhaps @Tactic@ is a wrapper around @STM@ that caches computation results.
* Here's another angle on change-based FRP: ++++
*  Follow the signal-function model.  The input type is a nested tuple, combining various external sources.
*  For each input source, construct a state update function.  Justify semantically & algebraically.
*  Extra credit: implement state update destructively.  Warning: tricky (overwrites).
*  What is the state?  More than just the ultimate output.  First approximation: the values of all of the pieces of the computation.
===
* Type of "parts" (e.g., of an input): ++++
\begin{code}
data Part :: * -> * -> * where
IdP     :: Part a a
FirstP  :: Part c a -> Part (c,b) a
SecondP :: Part c b -> Part (a,c) b
\end{code}
=== This type is like an explicit representation of (some of) composable references.  Here are the realizations of sub-value extraction and editing: ++++
\begin{code}
extract :: Part c a -> (c -> a)
extract IdP         = id
extract (FirstP  p) = extract p . fst
extract (SecondP p) = extract p . snd

change :: Part c a -> ((a -> a) -> (c -> c))
change IdP         = id
change (FirstP  p) = first  . change p
change (SecondP p) = second . change p
\end{code}
=== and composition ++++
\begin{code}
comp :: Part c b -> (Part b a -> Part c a)
comp IdP         = id
comp (FirstP  p) = FirstP  . comp p
comp (SecondP p) = SecondP . comp p
\end{code}
=== with specification ++++
\begin{code}
extract (comp p q) == extract q . extract p
change  (comp p q) == change  p . change  q
\end{code}
=== I really only wanted constant @a@ values for @change@, but I like how the definition mirrors @comp@'s in this slightly more general setting. Also, @extract@ and @change@ relate @Part@ to composable references.  Specializing: ++++
\begin{code}
subst :: Part c a -> (a -> (c -> c))
subst p = change p . const
\end{code}
=== Because of the way I've defined @change@ and @subst@, the "syntactic" analysis of parts is done once rather than at each substitution.  The extra parentheses in the type signature serve as a reminder.
* Parts separate out the //shape// of an edit from the particulars.  Every input source has a corresponding //part// of the input.  We can apply @subst@ to each of those parts to get an input-changing event, merge (@mappend@) the results all together, and @accumE@ the result with the initial value.  Instead, I want to turn each part into function that updates state, using the static description of the part and the bot.
* Playing with this example: @\ ((a,b),c) -> a*b + b*c@.  I can break down the example into arrow form, but applicative may be more convenient: ++++
\begin{code}
liftA2 (+)
(liftA2 (*) (fst>>>fst) (fst>>>snd))
(liftA2 (*) (fst>>>snd) snd)
\end{code}
===
* Replied to Ian P about Emergence.
* FRP: ++++
*  Given a bot and an input "part", generate a function that turns a corresponding input value into a state transformer.
*  What's the state?  It must contain the output value, but also intermediate values used in incrementally recomputing the output.
*  Pattern state structure after @Applicative@. ++++
\begin{code}
-- | Applicative state.  @AState c a@ is state of type @a@ for input of
-- type @c@.
data AState :: * -> * -> * where
PureS :: a -> AState c a
AppS  :: AState c (a -> b) -> AState c a -> AState c b
PartS :: Part c a -> AState c a
\end{code}
===
*  Updating then might have type ++++
\begin{code}
updater :: Part i a -> (a -> AState i o -> AState i o)
\end{code}
===
* Started writing post "Change-driven functions".
===
* Updated [[NVC Evolves| http://evolve.awakeningcompassion.com/]] blog to WordPress 2.3.3.
* In writing "Change-driven functions", I realized that I'll also want some kind of "let" or "dup" or something, to allow more than one use of a computed value.  And then I expect this stuff to be difficult/ugly to use.  Hm.  Discouraged.
* [[From Power Up To Bash Prompt| http://reddit.com/goto?rss=true&id=t3_6a2gz]], from November 2000.  "This is a brief description of what happens in a Linux system, from the time that you turn on the power, to the time that you log in and get a bash prompt. Understanding this will be helpful when you need to solve problems or configure your system."
* From [[Why don’t we have a word for it?| http://reddit.com/goto?rss=true&id=t3_6a1nf]]  "yosefk’s Semantical Decay Theorem: all useful terms which are not completely neutral become meaningless."
* Installed Webroot //AntiVirus and AntiSpyware//, with two-year license.
* Note to yampa-users about change-driven evaluation: ++++>
My sense at this point is that the obstacle to efficient change-driven evaluation is partly the arrow interface but more the current translation from arrow notation to combinator form.  Specifically, that translation uses the arr method quite a lot, which prevents analysis.  For instance, in Neil's example below, all of the arr-wrapped functions are simply manipulations of nested pair, but those manipulations are hidden in a function.

Suppose we define a superclass of Arrow with all of Arrow's methods except for arr and with some methods that can now be expressed in terms of arr, including fstA and sndA.  Make sure we have everything necessary to define the default methods for all of the current Arrow class's defaults (except for the arr/pure default cycle).  Now tweak the translator for arrow notation so that it targets the new superclass when possible.  In "p <- a -< exp", if the expression exp is built up from variables and pairing, no arr would generated  Neil's example is one such case. For change-driven evaluation, the implementation of fstA & sndA would know to ignore changes to only the snd or first half of a pair (and similarly with compositions).  For other expressions, the translation would still use fstA & sndA and minimize information flowing into arr's, and the arrow implementation could then exploit the static knowledge of what information does not get into the arr's.
===
* Think about data-driven FRP in terms of of static analysis and either code generation or very efficient interpretation.  Use the @Applicative@ interface.
* Although I don't know why my Reactive library sometimes crashes, I want to go ahead with writing a paper on it.  Perhaps in the process of writing, I'll get some new insights and fix the problem.  This approach worked for me with Eros.  I didn't know how to do input extractors when I started the paper.  So, what's in the paper: ++++
*  Reactive normal form.
*  Composing reactivity and functions to get reactive behaviors.
*  Simple, hybrid push & pull implementation.
*  Simple, formal semantics.
*  More extensive use of type classes
*  Proving correctness of the implementation with respect to the semantics, perhaps via the. worker-wrapper transformation.
*  Functional futures.
*  Compile-time rewriting to remove some threads & M-vars.
===
* [[A Pro-Thought Manifesto| http://reddit.com/goto?rss=true&id=t3_6a5cl]].  I'd like to write a follow-up blog post inviting and inspiring people to go beyond manifestos and moral positions (like the idea of a "moral duty" and the existence of "good" or "bad" ways to be).  Some nice bits on [[the author's home page| http://www.chiark.greenend.org.uk/~sgtatham/]].
* I just remembered this idea for Reactive, described on [[2008-02-23]] and  [[2008-02-24]]: restrict @mappend@ on futures so that it requires its arguments to agree when they succeed.  Then the operational indeterminacy is compatible with semantics determinacy.  This insight is a direct application Warren Burton's idea of "indeterminate behavior with determinate semantics in parallel programs", and in particular his notion of "improving values".
* Wrote to Warren Burton.
* Also this one: somehow work the data dependencies to preserve order in Reactive (mumble).
* Easy way to (a) back up a wordpress blog and (b) make a new copy (e.g., for fearlessly upgrading): ++++
*  Export from old, yielding an xml file.  Save locally.
*  Note widget set-up.  Mine: ++++
*   Calendar
*   Recent Posts
*   Archives
*   Categories 1 (will change to CTC)
*   Text 1, with {{{<b><a href="?p=*">Browse all posts</a></b>}}}
*   Meta
===
*  Note Blogroll.  Mine: ++++
*   [[Emergence of NVC| http://emergence.awakeningcompassion.com/]]
===
* Note Blog sub-title.  Mine: "Explorations in evolving the understanding, living, and teaching of Nonviolent Communication".
*  Create new, empty wordpress blog.
*  Set up widgets, blogroll, and blog sub-title the way I had them before.
*  Import posts from the local .xml file.  The import process will offer to transfer authorship to existing users or to create new users with the given names.  If created, tweak user permissions.
*  Activate plugins and enter Akismet API Key
*  And a few other fiddly bits, like plugin settings.
*  Okay, maybe not such an easy way.
===
* Got permalinks working for [[NVC Evolves| http://evolve.awakeningcompassion.com/]].
* Oops -- I just discovered that my emergence-of-nvc mail was bouncing.  It's fixed now.  I had thought there was no activity recently.  Banned some spammers, deleted some off-topic mail.  There was a note from [[Emma| http://emmamccreary.com]] asking about project activity.
* Do another version of @Future@ with functional improving values.  From there look at an efficient,
referentially transparent, side-effecting version.  It was trivially easy to plug improving times into
my event library, since I'd already parameterized over the time type, and I only require @Ord@.
* Here's an idea for improved merging with improving times: instead of asking which of two leading values is smaller, perform a "pair sort", yielding a pair.  The pair sort allows more work to be retained.  Here's what I mean.  First, the comparison-based merge, showing only the non-empty/non-empty case: ++++
\begin{code}
us@(u:us') merge vs@(v:vs') =
if u <= v then
u : (us' merge vs )
else
v : (us  merge vs')
\end{code}
=== Now the pair-sort-based merge: ++++
\begin{code}
(u:us') merge (v:vs') = a : insert b (us' merge vs')
where
(a,b) = sortPair (u,v)
\end{code}
=== Hm.  That insert bit may be awkward, and it will lead to some unnecessary comparisons.  Another angle is a comparison operation that yields a boolean and improved values of @u@ and @v@. ++++
\begin{code}
(u:us') merge (v:vs') =
if uLeq then
u' : (us' merge (v':vs'))
else
v' : ((u':us') merge vs')
where
(uLeq,u',v') = u leq v
\end{code}
=== On the other hand, if I represent improving times efficiently, updating the approximations in place, the simpler definition will work fine.
* Who provides these lower bounds?  Consider a GUI setting.  Each occurrence has an improving time and a value ...
* Idea: instead of parameterizing over a time type, parameterize over a future type constructor.  One candidate is @(,) (Time t)@ for @t@ in @Ord@, e.g., @Improving Double@.  Another candidate would have the value nested inside of improving times, like @AtLeast t0 (AtLeast t1 (Exactly t a))@.
* I'm bouncing around among various ideas.  Pick one and focus on it.  My favorite is the "tactics" idea from [[2008-02-24]].  Use concurrency to try multiple tactics for answering a single query.  The tactics must agree where they succeed, so that the behavioral nondeterminism doesn't compromise the semantic determinism.  Specifics: ++++
*  Represent a future value via an MVar that holds a best-yet-known approximation.  These mvars can change, but only by increasing information content, to preserve referential transparency.
*  I could have a process that eagerly and frequently improves approximations, but doing so would burn cycles.  What to do instead?  Wait until comparisons are demanded.
*  To resolve whether @s <= t@, try two different tactics.  One computes @s@ exactly and compares the exact result to the possibly unknown @t@.  The other tactic does the reverse.  (As mentioned on [[2008-02-24]], there's a three-tactic alternative.)  Each tactic can have pure functional interface and semantics if I handle failure with nontermination.  I expect that using pure interfaces will help with simple result sharing, as usual.
*  Operations on future values or times: ++++
\begin{code}
exact :: Improving a -> a
(*<=), (*<), (*>=), (*>) :: Ord a => Improving a -> a -> Bool
(<=*), (<*), (>=*), (>*) :: Ord a => a -> Improving a -> Bool
\end{code}
=== I've elimanted the explicit @Tactic@ notion here, in favor of pure values.  To try different "tactics", use an @unamb@ function, which has the precondition that its arguments must agree when non-bottom, and is thus semantically unambiguous.  (Looking for another name.) ++++
\begin{code}
unamb :: a -> a -> a
\end{code}
=== Then to test @si <= ti@ for IVs (improving values) @si@ and @ti@: ++++
\begin{code}
(exact si <=* ti) unamb (si *<= exact ti)
\end{code}
===
*  How to represent IVs and implement these @exact@ and comparison primitives?
===
* Look up Don S's "Holy Shmoly" posts and read about concurrency mechanisms.
* Learning about feedburner.  Got set up with nvc-evolves (http://feeds.feedburner.com/nvc-evolves and .../comments-for-nvc-evolves).  To do ++++
===
* 95249 is latitude 38.192823, longitude -120.642852
* my hscolour'ing is broken.  i think it's not finding the css
* Got help from Cale on monad composition: +++
{{{
<Cale> Cool :)
<Cale> http://en.wikipedia.org/wiki/Distributive_law_between_monads -- this lists the axioms
required, but in CT notation.
<Cale> so, translating...
<Cale> join . fmap distriM . distribM = distribM . fmap join
<conal> Cale: so l == distribM
<Cale> yeah
<Cale> fmap join . fmap distribM . distribM == distribM . join
<Cale> then...
<Cale> distribM . fmap return == return
<Cale> and
<Cale> distribM . return == fmap return
<Cale> That'll be it :)
<conal> Cale: thx
<Cale> conal: It might be interesting to know if these translate in any nice way into a set of
conditions on bind.
<conal> Cale: sure.  i'm going to walk through your translations and the diagrams and see if get the
process.
<Cale> I'm not sure, but if there was anything written about it, I'm pretty sure I've seen a paper
which was more CS-oriented regarding these distributive laws.			      [12:26]
<Cale> I'll see if I can find it.
<conal> Cale: that'd be cool.  and if not, i can work with the diagrams.
<conal> Cale: do you know if anyone has explored replacing the monad transformer libraries with a
type-composition approach?  it sure seems more modular.				      [12:28]
<Cale> conal: Well, there was a short article about it... Monad transformers are more general --
some of them arise from pre or post compositions, but some, like State, are something else
altogether.
<Cale> conal: It was on Reddit a while back.
<conal> Cale: glad to know.  if you run across the article, please let me know.
<Cale> okay, I'll have a look
<Cale> http://sneezy.cs.nott.ac.uk/fplunch/weblog/?p=84 -- ah, here it was
<lambdabot> Title: FP Lunch » Blog Archive » Monad Transformers
<Cale> I'm not altogether certain about its claim for Cont, but the rest seems okay to me.
<conal> Cale: super.  thanks!  :)
<Cale> conal: I'll keep looking for that paper I was thinking of.
<Cale> CiteSeer is annoyingly down.
<conal> Cale: one thing i'd like to do is go back and simplify GUIs in Phooey.  i had a very lovely
formulation via type composition, which i used for Functor & Applicative, but i didn't think
i could use it for Monad.  i'll try again.
<Cale> conal: That would be interesting. The really nice thing about Applicative of course is that
you don't have to add any extra information to say how things compose, but perhaps the
appropriate distributive laws are available to make what you have into a monad :)      [12:37]
<conal> Cale: exactly!  i hadn't thought of that possibility until recently.		      [12:38]
pdx.edu/~mpj/pubs/composing.html -- ah, I think this might be it.
<Cale> It's a little hard to find, given that it doesn't appear to refer to the distributive laws as
such.
<conal> Cale: ooh -- i'd forgotten about that paper.  looks very applicable.  thanks again.   [12:40]
}}}
===
* Title idea: "Simply classical FRP"
* Reconstructed quite a lot of Reactive on top of the simple semantic @Future@, using @Event a = [Future (Maybe a)]@, though more nicely structured as a type composition.  Still to do: a way to create low-level events, something like @mkEvent :: IO (Event a, Sink a)@.  Or maybe switch to another @Future@ implementation, so that @Event a = [Future a]@ is practical or back to @Event a = Future (Reactive a)@. ++++
\begin{code}
data Future a = AtLeastF (Time t) (Future a) | ExactlyF (Time t) a
\end{code}
=== or isolate the "improving" bit to time: ++++
\begin{code}
data T = AtLeastF (Time t) T | Exactly (Time t)
\end{code}
=== or, refactoring, ++++
\begin{code}
data T = T (Time t) (Maybe T)
\end{code}
===
* Do another version of @Future@ with functional improving values.  From there look at an efficient, referentially transparent, side-effecting version.  It was trivially easy to plug improving times into my event library, since I'd already parameterized over the time type, and I only require @Ord@.
* Here's an idea for improved merging with improving times: instead of asking which of two leading values is smaller, perform a "pair sort", yielding a pair.  The pair sort allows more work to be retained.  Here's what I mean.  First, the comparison-based merge, showing only the non-empty/non-empty case: ++++
\begin{code}
us@(u:us') merge vs@(v:vs') =
if u <= v then
u : (us' merge vs )
else
v : (us  merge vs')
\end{code}
=== Now the pair-sort-based merge: ++++
\begin{code}
(u:us') merge (v:vs') = a : insert b (us' merge vs')
where
(a,b) = sortPair (u,v)
\end{code}
=== Hm.  That insert bit may be awkward, and it will lead to some unnecessary comparisons.  Another angle is a comparison operation that yields a boolean and improved values of @u@ and @v@. ++++
\begin{code}
(u:us') merge (v:vs') =
if uLeq then
u' : (us' merge (v':vs'))
else
v' : ((u':us') merge vs')
where
(uLeq,u',v') = u leq v
\end{code}
=== On the other hand, if I represent improving times efficiently, updating the approximations in place, the simpler definition will work fine.
* Who provides these lower bounds?  Consider a GUI setting.  Each occurrence has an improving time and a value ...
* Idea: instead of parameterizing over a time type, parameterize over a future type constructor.  One candidate is @(,) (Time t)@ for @t@ in @Ord@, e.g., @Improving Double@.  Another candidate would have the value nested inside of improving times, like @AtLeast t0 (AtLeast t1 (Exactly t a))@.
* I'm bouncing around among various ideas.  Pick one and focus on it.  My favorite is the "tactics" idea from [[2008-02-24]].  Use concurrency to try multiple tactics for answering a single query.  The tactics must agree where they succeed, so that the behavioral nondeterminism doesn't compromise the semantic determinism.  Specifics: ++++
*  Represent a future value via an MVar that holds a best-yet-known approximation.  These mvars can change, but only by increasing information content, to preserve referential transparency.
*  I could have a process that eagerly and frequently improves approximations, but doing so would burn cycles.  What to do instead?  Wait until comparisons are demanded.
*  To resolve whether @s <= t@, try two different tactics.  One computes @s@ exactly and compares the exact result to the possibly unknown @t@.  The other tactic does the reverse.  (As mentioned on [[2008-02-24]], there's a three-tactic alternative.)  Each tactic can have pure functional interface and semantics if I handle failure with nontermination.  I expect that using pure interfaces will help with simple result sharing, as usual.
*  Operations on future values or times: ++++
\begin{code}
exact :: Improving a -> a
(*<=), (*<), (*>=), (*>) :: Ord a => Improving a -> a -> Bool
(<=*), (<*), (>=*), (>*) :: Ord a => a -> Improving a -> Bool
\end{code}
=== I've elimanted the explicit @Tactic@ notion here, in favor of pure values.  To try different "tactics", use an @unamb@ function, which has the precondition that its arguments must agree when non-bottom, and is thus semantically unambiguous.  (Looking for another name.) ++++
\begin{code}
unamb :: a -> a -> a
\end{code}
=== Then to test @si <= ti@ for IVs (improving values) @si@ and @ti@: ++++
\begin{code}
(exact si <=* ti) unamb (si *<= exact ti)
\end{code}
===
*  How to represent IVs and implement these @exact@ and comparison primitives?
===
* Look up Don S's "Holy Shmoly" posts and read about concurrency mechanisms.
* Chat with Christophe P (vincenz) about how to help a troll.  Saved.
* Chat on #oasis (vincenz et al) about what to call @unamb@ and what it means.
* There's a problem with the approach I've been pursuing with @(<=*)@ etc.  The result is a @Bool@, which means it cannot give me any partial information.  In contrast, the result of @min@ is @Improving t@, which can start producing partial information (lower bounds) right away.  On the other hand, I don't know how to use @min@ and not @(<=)@ simply and elegantly, to do merging.
* (Continuing.)  //Idea:// return to my previous formulation of an event as a single future reactive value.  Here's merging: ++++
\begin{code}
instance Monoid (Event a) where
mempty  = Event mempty
mappend = inEvent2 merge

merge :: Future (Reactive a) -> Future (Reactive a) -> Future (Reactive a)
u merge v     =
(onFut (merge v) <$> u) min (onFut (u merge) <$> v)
where
onFut f (a Stepper Event t') = a stepper Event (f t')
\end{code}
=== (There's also a @Never@ event constructor, for optimization.)  The @min@ here is on futures.  Semantically, ++++
\begin{code}
Future (s,_) <= Future (t,_) = s <= t

fa min fb = if s <= t then fa else fb
\end{code}
=== The @min@ definition here is a generic default for ordered types.  The drawback is that it cannot provide any partial information until @s <= t@ can be determined.  The following alternative can provide as much partial time information as the time type allows. ++++
\begin{code}
Future (s,a) min Future (t,b) =
Future (s min t, if s <= t then a else b)
\end{code}
=== A payoff for this change from @(<=)@ to @min@ comes when considering merging three events.  If we compare two initial occurrences using @(<=)@, we won't be able to produce any info that could show that the third initial occurrence comes first.
* This @min@ definition may have some redundant computation, considering how related @min@ and @(<=)@ are.  If improving values are implemented functionally as lists or nested bound structures, then @s min t@ is making progress through both @s@ and @t@, but then @s <= t@ starts over.  The result is a space-time leak.  More helpful would be an operation that both computed the minimum and said which one it was. ++++
\begin{code}
minI :: Ord a => Improving a -> Improving a -> (Improving a,Bool)
\end{code}
=== Then ++++
\begin{code}
Future (s,a) min Future (t,b) = Future (u, if c then a else b)
where
(u,c) = s minI t
\end{code}
=== Unfortunately, this definition breaks temporal polymorphism.
* Conclusion: define events as future reactive values, define @min@ on futures as above (not the default in terms of @(<=)@).  Use futures (and hence events and reactive values) over improving times.  Then all I have to implement carefully is @min@ on improving values (IVs).  If I want temporal polymorphism, I'd better implement IVs imperatively (which has additional performance benefits).
* Warren Burton sent me pointers to some of his papers.
* Remember: when using "{{{wget --recursive}}}", be sure to add "{{{--no-parent}}}".  Otherwise, I'll get the //whole domain//!
* Filled out the Anygma job inquiry form Peter V mailed. +++
any or all of: architect, visionary, lead dev, co-dev, problem-solver, consultant, mentor, teacher, mediator.
===
I've been exploring this reactive multi-media & graphics deeply since 1990 and have designed, implemented and used several similar systems.  With this experience and insight, I'm in a unique position to see and communicate the ramifications and importance of many, many design choices the project will encounter along the way.

About salary and other terms of employment: I'm interested in a thoroughly collaborative role in Anygma, from beginning to end and at all levels and functions, so that our interests are aligned.  I've had enough involvement with organizations and projects whose members are at cross purposes, and I'm ready to shift gears.  Face-to-face discussions would probably be most effective start.  I'm available to visit after the April 2 ICFP paper deadline.

Warm regards,  - Conal
===
===
* Efficient improving values (IVs): ++++
*  The two necessary operations seem to be (a) extracting an exact value, and (b) comparing with a given exact value.  So, represent an IV //as// that pair of operations. ++++
\begin{code}
data Improving a = IV a (a -> Ordering)
\end{code}
=== The IV for a known value @x@ is @IV x (compare x)@.  How do I implement @minI@ from [[2008-03-04| yesterday]]? ++++
\begin{code}
minI :: Ord a => Improving a -> Improving a -> (Improving a,Bool)
IV u uComp minI IV v vComp = (IV uMinV wComp, uLeqV)
where
uMinV = if uLeqV then u else v
-- u <= v: Try @v compare u /= LT@ and @u compare v /= GT@.
uLeqV = (vComp u /= LT) unamb (uComp v /= GT)
-- (u min v) compare t: Try comparing according to whether u <= v,
-- or go with either answer if they agree, e.g., if both say GT.
minComp = if uLeqV then uComp else vComp
wComp t = minComp t unamb
assuming (uCompT == vCompT) uCompT
where
uCompT = uComp t
vCompT = vComp t

-- Yield a value if a condition is true.  Otherwise wait forever.
assuming :: Bool -> a -> a
assuming c a = if c then a else hang
\end{code}
===
*  What's left?  How do I make an improving value for the times of primitive events?
===
* How does my @Improving@ type relate to Warren B's?  Consider other names.
* Next for Reactive: how to package up external events (e.g., in a GUI) via @Improving@?  How to represent occurrences of these external events?  The key bit is the comparison part (the @a -> Ordering@ field).  We'll want some way to to query the event loop (generating monotonic times) and find out its current time.  If the current time is past the query time and the occurrence hasn't arrived, then the answer is @GT@.  Otherwise, ask to be notified when that time passes.  Take care with thread-safety and race conditions.  Idea: the query and notification works by immediately returning a pure boolean value, which is made from an IVar.  The IVar gets written by the event loop either immediately or in the future.  The writing happens in the event loop's thread and does the time comparison.  mumble.
* Hm: maybe I don't really ever have to sleep.  Here's my thinking: suppose I choose for the type parameter of @Improving@ an artificial notion of time, namely an incrementing counter with an opaquifying @newtype@ wrapper.  Then I can never have such a time in hand unless it's already arrived.  The @newtype@ wrapper prevents times from being created in advance.  Drawbacks: ++++
*  I can't have predictable occurrence times, e.g., for synthetic events (e.g., timers).  I'd have to simulate them, which would interfere with possible optimizations.
*  I don't know how to interesting different events based on different external sources, e.g., GUI, file system, and networking.
===
* To simplify conversion from my current Reactive implementation, try to match the interface, especially @mkEvent :: IO (Event a, Sink a)@.
* Reactive: ++++
*  Idea: augment my @Improving@ implementation with a best-so-far approximation.  Hide it inside the comparison closure.
*  Try out one of the simple, sequential, functional implementations of Reactive.  Get it working, and use it in my other projects.  It would pump a lot of non-occurrences through but wouldn't re-compute and re-render unnecessarily, so it's still an improvement over other FRP implementations.  Compose in time functions and stop and start threads (or feed and starve channels) for constant and non-constant time functions.
*  Here's one way to create events.  Make a channel of time/@Maybe@ pairs.  Start a thread that pumps @Nothing@ values into it.  For each actual occurrence, also add in a @Just@ value.  What about times?  I have to make sure the times are monotonic.  Oh, don't use a thread.  Use a GUI timer call-back instead, to exploit the single-threaded nature of the main event loop.  Convert the channel into a list with [[getChanContents| http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent-Chan.html#v%3AgetChanContents]].  Given the list, I can map to different event representations, where @DoubleI = Improving Double@. ++++
*   @Event a = [FutureG Double (Maybe a)]@
*   @Event a = [FutureG DoubleI a]@
*   @Event a = FutureG DoubleI (Reactive a)@
=== I already have all three of these implementations.  Given my list of time/@Maybe@ pairs, conversion to the first representation choice is trivial.  For the other two, ++++
\begin{code}
-- | Interpret 'Nothing' values as lower bounds
improveMbs :: [(t, Maybe a)] -> [(Improving t, a)]
improveMbs = foldr f []
where
f (t,Just a ) qs = (Imp [t],a) : qs
f (t,Nothing) ((Imp ts', a) : qs') = (Imp (t:ts'), a) : qs'
f (_,Nothing) [] = error "improveMbs: input ends in a Nothing"

-- | Convert a temporally monotonic list of futures to an event
listToEvent :: Ord t => [FutureG t a] -> EventG t a
listToEvent = foldr (\ fut e -> Event ((stepper e) <$> fut)) mempty \end{code} === === * Finally found the bug that made event occurrences have all the same time. It was in @withTimeE@. * Warning! In the @Ord@ instance for @AddBounds@ (in Future), @min@ might be be defaulted and hence using @(<=)@ instead of @min@ on @NoBound@. If so, I'm we@re going to lose the ability to get partial information back from @min@ on @AddBounds@. If that's the case, define my own @Ord@ instance instead of deriving. * I'm still having a problem with held-back reactions. Without @mappend@, no problem. With @mappend@, I don't get a reaction to one until I have an occurrence of the other. For instance, with @a mappend b@, if @a@ occurs three times, no reaction. Then when @b@ occurs, all of the @a@ reactions happen (with the correct times). Hm. That sounds like what would happen if I were //not// padding event occurrences. It would also be explained by using @(<=)@ instead of @min@ in @mappend@. Oddly, that may well have been happening before I fixed the definition of @AddBounds@. Hm. There's another layer as well, which is @Max@. ++++ \begin{code} type Time t = Max (AddBounds t) newtype Max a = Max { getMax :: a } deriving (Eq, Ord, Read, Show, Bounded) \end{code} === But @Max@ is a @newtype@, so @min@ is just @min@ on @a@, i.e., @AddBounds (Improving Double)@. * Most things work now. After I fixed the @AddBounds@ issue, the remaining problem is in event tracing itself. I can get the times, but block on the values. * I'm getting a neat bug now. I have a counter example. Starts zero and increments whenever I hit the button. It's incrementing before the event actually occurs. I think it's because there is nothing to force the time. The answer is correct in anticipation. My execution thread consumes the reactive value as fast as it can. When there are two different events, I don't get this race-ahead. I think because then the first output really does depend on the time of first input. Funny -- I wonder why it doesn't keep counting forever, in repeated anticipation. Maybe because the definition of @improveMbs@ can't produce a second cons until it finds it's first actual occurrence. ++++ \begin{code} improveMbs :: [(t, Maybe a)] -> [(Improving t, a)] improveMbs = foldr f [] where f (t,Just a ) qs = (Imp [t],a) : qs f (t,Nothing) ~((Imp ts', a) : qs') = (Imp (t:ts'), a) : qs' \end{code} === I could rewrite @improveMbs@ to be more productive, if I wanted. * Made second google account, this time using my conal.net email address. * @traceE@/@traceR@ as a simple @fmap@. No times. Simply @fmap (liftA2 (trace.shw) id)@. Oh! That tracer applies to arbitrary functors. * Consider once again representing events as temporally ordered lists of occurrences. Compose from a sorted-list functor and @FutureG t@. Oh! Now I remember why that idea didn't work out: I can't define @Applicative@ or @Monad@ instances simply because of the @Ord@ constraint on the type parameter. Instead, I could use lists of pairs, sorted by the first value: ++++ \begin{code} newtype AscendK k v = AscendK [(k,v)] -- with Ord k \end{code} === Now to make this work out prettily, I'd like the @k@ type to be in @Ord@, @Bounded@, and @Monoid@. I can ensure these constraints via adding the type wrappers @AddBounds@ and @Max@, in other words, my @Time t@ type. * Maybe I want to pick more general terms to replace "Time" and "Future". * Given this @Ord@ constraint, why not represent events as an efficient balanced a tree form, as in @Data.Map@ or even finger trees. Then I could get reasonably fast random access. But, oops: how would one balance an infinite sequence? * New NVC Evolves post [[Beyond rules and guidelines| http://evolve.awakeningcompassion.com/posts/beyond-rules-and-guidelines/]]. * Added an interface to time sync'ing, so my outputs won't predict the future. * Got Reactive and Phooey mostly working. There are still a few funny timing problems with output getting delayed, particularly in dragging. * Switched back to the representation of events as lists of future values. Now I'm right there with the semantics, where I want to be. Broke a lot of my examples, however. Probably due to insufficient laziness. * Various "NVC Evolves" correspondence, including with Jim Manske on [[Using the name "NVC Evolves"| http://groups.google.com/group/nvc-evolves/t/cdea1c3fd93a10ac]] and eZ on [[nvc or cnvc?| http://groups.google.com/group/nvc-evolves/t/5aa6509c956d3b11]] * [[How to Define Keyboard Shortcuts in Emacs| http://xahlee.org/emacs/keyboard_shortcuts.html]]. Explains how to notate key sequences, with which I sometimes struggle. I didn't know about the {{{kbd}}} elisp function. * Reactive paper: ++++ * Working on outline. I'm pretty happy with the intellectual content. * Wrote abstract * Working on introduction === * Neat blog: [[Computational Arts: Activities, News, and Resources| http://runtime.ci.qut.edu.au/]]. It includes [[a post| http://runtime.ci.qut.edu.au/pivot/entry.php?id=69]] on my google tech talk. * Put up new-reactive darcs repo, and sent note to Ivan. * Working on Reactive paper. * Reply to Peter V * Conversations on NVC Evolves * Working on Reactive paper. ++++ * Here's a clearer way to define the semantics of events. Define @Event@ via a wrapper ++++ \begin{code} type EO = [] :. (,) Time newtype Event a = Ev { unEv :: EO a } \end{code} === Then define the semantics of operations on @Event@ by time-sorting the result of those some operations on @EO@. ++++ \begin{code} sortE :: EO a -> Event a sortE (O ps) = Ev (O (sortBy (comparing fst) ps)) instance Functor Event where fmap f (Ev e) = sortE (fmap f e) instance Applicative Event where pure a = sortE (pure a) Ev ef <*> Ev ex = sortE (ef <*> ex) instance Monad Event where return a = sortE (return a) Ev e >>= f = sortE (e >>= unE . f) \end{code} === * Oh. Simpler yet: define two semantic functions: ++++ \begin{code} type E a = [(Time,a)] -- sorted occs' :: Event a -> EO a occs :: Event a -> E a occs (O ps) = sortBy (comparing fst) ps \end{code} === * The trick then is to transform the semantics into a tenable implementation of @occs'@, systematically replacing @occ@. === * Working on Reactive paper. ++++ * Idea: formulate Reactive via type product: @type Reactive = Id :*: Event@. What would the standard instances for @(:*:)@ say about @Reactive@? @Functor@ works out fine, but @Applicative@ doesn't. First (dropping isomorphism constructors), @pure a == (a, pure a)@, so we have an initial value and and initial change to that value. Second, @(f,ef) <*> (x,ex) == (f x, ef <*> ex)@, so there are a lot more change events than expected: product instead of sum. OMG! A lot of those events happen simultaneously, so maybe it all comes out the same. If @ef@ has @m@ occurrences and and @ex@ has @n@ occurrences, how many distinct occurrence times can there be for @ef <*> ex@? At most @m+n@. Why? Because the occurrence times are all of the form @tf max tx@ for occurrence times @tf@ of @ef@ and @tx@ of @ex@. Since the maximum of two values is one of those values, there are only @m+n@ possible maxima. * Bumping up to monads, I really can get lots more non-simultaneous occurrences. * Hm. Do I really want to keep events and reactive values separate? Suppose I just use events, which I sometimes interpret as a reactive value. I'd say that the value is undefined before the first occurrence. Then I wouldn't have the redundant occurrence for @pure@. If I want to add an initial value @a@ to an event @e@, use @pure a mappend e@. * The meaning @(f,ef) <*> (x,ex) == (f x, ef <*> ex)@ is also //missing// some transitions. Suppose @ex@ occurs before @ef@. There won't be a transition to @f ex@, because the first occurrence of @ef <*> ex@ coincides with the later of the first occurrences of both @ef@ and @ex@. If the second occurrence of @ex@ precedes the first occurrence of @ef@, then that transition will be missed also. There's an easy fix: turn the reactive values into events via the @pure@/@mappend@ trick and combine simply as @ef' <*> ex'@. * In a sense, the left bias of event @mappend@ is really a right bias. The left one occurs first, but the right one, happening second, sticks. * Suppose I define reactive values and events by simple mutual recursion (as in the current Reactive release): ++++ \begin{code} R = Id :*: E E = (T,) :. R \end{code} === What do the instances for type product and composition imply? @fmap@ works fine. I think @pure a@ has infinitely many occurrences at -infinity, all with value @a@. @ef <*> ex@ I think would yield @[(tf1 max tx1, f1 x1), (tf2 max tx2, f2 x2), ...]@, instead of combining all pairs. In other words, I'd get the @ZipList@ AF instead of the list/backtracking AF. I sent a query to Conor & Ross and haskell-cafe asking about a deconstruction of the list AF. * How and why might one use the @Event@ AF? * Where do I introduce future values? They simplify the semantics and semantics-based implementation. And they're motivated by the same. Try right before the semantics section. === * Chat with Shae E (shapr) and Ed Kemmett (edwardk) about HOAS and Ed's use. I uploaded a .tar.bz2 of my old Eros that uses hoas. Ed pointed me to "Boxes goes Bananas" (downloaded). Sounds like the authors came up with a general way to do in Haskell what we did in Lambda-Prolog, with universal quantification and implication. If so, I want to understand it. Saved chats (shapr and scannedinavian). * Reactive paper: ++++ * To give my paper more focus, I could remove the @Applicative@ & @Monad@ stuff, possibly adding it back in a later section. The re-sorting for @Applicative@ is more complicated than for @Monoid@, which just uses @merge@. * Define an @Ascending@ list monoid that does the merge. Define @E a = Ascending (Future a)@ or @E = Ascending :. Future@. * Reconsider @E a = Future (Reactive a)@, or @E = Future :. Reactive@. Define @mappend@ using @mappend@/@min@ for @Future@. * Wrote short section on "Deconstructing events", into future values and sorted lists. Made a module {{{Data.Sorted}}}, which has the @Monoid@ instance and classes and instances for @Ord@-enriched functors, AFs and monads. The idea in each case is to do the corresponding method on the lists and sort the result. For @Monoid@, the sort becomes just a merge. For the others, optimization would require additional context from use, as in events. * On //n//th thought, suppose I remove the sortedness from the semantics of events. Just say that the semantics is a list of time/value pairs. For semantics, define @E = [] :. (,) T@ and use the resulting instances. So what's the meaning of @a stepper e@ sampled at @t@? Sort @e@ (stably), and use the value of the latest last future whose time is earlier than @t@. * Rethinking my paper structure //again//. Since I'm keeping the semantics so very simple, give the semantics //as// I'm introducing FRP. When to introduce future values, reactive values, and time functions? How about this: say I'm going to build up from reactive values to reactive behaviors. Develop reactive values and then in a later section show time functions and the composition of reactive values and time functions to get reactive behaviors. Even show the composition up front, with definition of @Fun@ to come later. * Try this outline: ++++ * Introduction * FRP deconstructed: @E = [] :. (T,)@, @B = (T ->)@ * Decoupling continuous and discrete change: @B' = E :. (T ->)@. @Fun@. * Reactive values: @R a = (a, E a)@. Optimize @Applicative@, using @accumE@ and @pairR@. * Future values: data type for @(T,)@. Tricky time bit. * Improving values * Improving on improving values * Related work * Limitations and Future work * Acknowledgments === === * Learned on #haskell: ++++> The [[closed world assumption| http://en.wikipedia.org/wiki/Closed_world_assumption]] is the presumption that what is not currently known to be true is false. The same name also refer to a logical formalization of this assumption by Raymond Reiter. The opposite of the closed world assumption is the open world assumption, stating that lack of knowledge does not imply falsity. === Bring out that term next time someone I notice someone on #haskell declaring something to be impossible, just because they can't imagine how it could be done. * How to specify the semantics of @switcher@ and @stepper@? I can define @switcher = (fmap.fmap) join stepper@ or @a stepper e = pure a switcher fmap pure e@. Let's go with @stepper@. Add the initial value or behavior as an event occurrence: @e' = pure a mappend e@. Given a sample time @t@, filter the occurrences of @e'@, keeping just the ones properly before @t@ and pick the value in the last such occurrence. Oh, oops! That definition don't work. The last occurrence in the filtered list might be earlier than some previous occurrences. So first stable-sort the occurrences. ++++ \begin{code} at (a0 stepper e) = \ t -> last [a | (ta,a) <- os, ta < t] where e' = pure a0 mappend e os = stableSort (occs' e') \end{code} === For switcher instead of stepper, I could replace @a@ with @b@ and apply the rhs to @t@. But don't bother, because @join@ does exactly the same thing, semantically. Now, this semantics is pretty simple. And it can lead right into a discussion on implementation. Keep the occurrences sorted. Sort incrementally. Sample monotonically. * Simpler: instead of augmenting @e@ with @pure a@, cons @a@ to the filtered list before extracting the @last@ element. Also do a bit of refactoring. ++++ \begin{code} sortOccs' :: Event a -> [(T,a)] sortOccs' = stableSort . occs' occsTo :: Event a -> T -> [(T,a)] occsTo e t = [a | (ta,a) <- sortOccs' e, ta < t] at (a0 stepper e) = \ t -> last (a0 : occsTo e t) = last . (a0 :) . occsTo e \end{code} === * Oh: define @stepper@ in terms of @accumR@, which is defined via @accumE@. ++++ \begin{code} a stepper e = a accumR fmap const e a accumR e = a stepper (a accumE e) \end{code} === Oh -- oops. Circular definitions. Okay, look at an accumulating combinator like @accumE@. I think I have to process process the event argument in occurrence order. ++++ \begin{code} accumE :: a -> Event (a -> a) -> Event a occs (a accumE e) = O (pure a accum fmap (<*>) (sortOccs' e)) \end{code} === * Next, what about reactive values, @R a = (a,E a)@? As explored on [[2008-03-14]], I don't think the @R@ works with my game of deriving semantics right from the model and standard instances. Oh -- maybe I want reactive normal form. Could be ++++ \begin{code} data RNF a = TFun a Switcher Event (TFun a) -- or data HRNF a = TFun a Switcher Event (HRNF a) \end{code} === Either one could be the @Behavior@ representation. If @RNF@, then factor into reactive values and time functions. ++++ \begin{code} type Behavior = R .: TFun data Reactive a = a Stepper Event a rat :: Reactive a -> B a rat (a Stepper e) = at (a stepper e) = \ t -> last (a0 : occsTo e t) at :: Behavior a -> B a at (O rf) = join (apply . rat rf) \end{code} === * What does this normalization accomplish? The goal is to design a data representation that can be stepped through, moving forward in time. Each piece is non-reactive. If constant, then do just one output, and otherwise set up a pull loop, which is the best we can hope for. * Since yesterday, we've been unable to access our domains (on Joseph's server), though other people are able to. I can't get through on FireFox or IE or SecureCRT. Other domains come through fine. Happens on our WinXP machines and on my Ubuntu laptop. Happens with the router as well as wired right to the modem. I spent an hour or so on the phone with a helpful Starband tech. He suspected my virus software, but that wouldn't account for the behavior on Ubuntu. Holly tested at Mom's house. Same situation. I called Starband again later. They know about the problem and are working on it. * Paper: ++++ * Where do I go from here? ++++ * Reactive normal form: leading to a data representation for behaviors ++++ * The behavior & event combinators are very flexible. In @b0 switcher e@, the phases (@b0@, ...) themselves may be reactive, either as another @switcher@, or a @fmap@ or @(<*>)@ involving reactive behaviors. Yet, semantically, a behavior built with these combinators must be made of phases of non-reactivity. (Inductive argument, considering the behavior combinators.) Suppose we could represent behaviors in a way that reveals this phase structure * @data RNF a = TFun a Switcher Event (TFun a)@ === * Decomposing behaviors: Reactive values & time functions * Incremental time sorting * Optimizing Applicative on behaviors / RNF: reduce m*n to m+n * Data-driven: how does it relate to data representations? === === * I just realized the Reactive join with the list-of-occurrences event representation doesn't work. For @join ((a Stepper e) Stepper re)@, I have to stop listening to @e@ once @re@ occurs. This @join@ is very important, since it implement switcher. I could fix it with a new event combinator that combines @e@ and @re@ just so. * I also realized that my formulation of @Behavior = Reactive :. Fun Time@ might not be a monad after all. I don't know how to eliminate @Fun Time (Reactive c)@. I can get as far as using the dorp construction from [[Combining Monads| http://www.cs.nott.ac.uk/~nhn/Publications/icfp2005.pdf]]. * Released TypeCompose 0.5 and (old) reactive 0.5 * Gwern B sent some patches for reactive. Hang onto them for when I make another release. Search my mail for from gwern0 to me. * Conversation with Gwern (saved) on GUI libraries. A quote: ++++> the point was monads seem to've solved IO and number of other problems, once and for all, so to speak, for haskell === I think Gwern was voicing a widely held belief and one that concerns me: that people think monads solved IO rather than hiding it. Or another take: monads solve the problem of how to do imperative computation (including IO), instead of the more important problem of how //not// to do IO. Doing IO means intractable semantics and hence great loss of formal reasoning. Which is the point of Backus's "can programming be liberated" paper. The disservice of monadic IO is in its superficial convenience via its cosmetic similarlity to functional programming, lulling even FPers into satisfaction. To me, the Haskell community embracing monadic IO is like the Roman Empire embracing Christianity. Popularity increased, but I'm not sure there's much of the original enterprise remaining. * Reactive paper: ++++ * Trying to capture and express the big picture: ++++ * Simple compositional semantics, homomorphic over standard type classes. * Bridge the gap between this semantics and an efficient implementation. ++++ * Sorting and searching in events: sort incrementally, sample monotonically. * Demand-driven sampling: normalize behaviors to reveal phase structure. Each phase is non-reactive and often constant, with an event to deliver a new phase. If constant, no work is done until the event occurs. === === === * Oh, oog. My list-based event semantics has a problem. ++++ * It appends lists and later sorts. I've been assuming @sort (os ++ os') == sort os merge sort os'@. However, if @os@ is infinitely long, then @os ++ os' == os@, so the @os'@ occurrences are lost. Similarly for @Applicative@. Yeeps. * I could go back to my semantics of time-sorted sequences, with its parallel classes for @Functor@, @Applicative@, and @Monad@. Or drop the factoring and define @Event@ semantics directly. I couldn't append-then-sort, so I'd have to introduce @merge@ right into the semantics. And much more complex for @Applicative@ and @Monad@. Not very appealing. * How can a keep the semantics simple? Better yet, how can I simplify away the problem with my list-based semantics? * Idea: the semantics of events is a function from time to lists of occurrences. Similar to the Yampa formulation of events as @Maybe@-valued signals and signal transformers. Doesn't have to be list. Could be any monoid. Now, how to define the meaning of @switcher@? What is the latest relevant occurrence? This question came up for me at the very beginning of my FRP work. I wanted to consolidate behaviors and events, and I couldn't quite see how to do it. Funny that it's still here. Would the class instances work out? Yes for @Monoid@ and @Functor@, but I think not for @Applicative@ (and hence for @Monad@). @pure@ makes constant functions, and @Applicative@ combines concurrent values. * Oh, of course! Don't use empty values between occurrences. Hold the value between. Then @Applicative@ has exactly the desired meaning. Of course, I'm talking about //reactive values// here. The events are at the transitions. However, I don't know what a transition means within the function model. What happens when two event occurrences have the same value. For a @()@-valued event, //all// occurrences have the same value. * Idea: augment the time function with a discrete set of times. For @pure@, use {-infinity}. For @(<*>)@, use union. Oh! If @pure@ uses the empty set instead, I have monoidal combination. Use a pair of @(set,fun)@, i.e., @(,) (Set Time) :. (->) Time@. Use the standard @Functor@ and @Applicative@ instances for type composition. Perhaps @Monad@ also in this case. The @Applicative@ instance is a better fit as it doesn't pile up simultaneous occurrences. Does @pure a = O (empty, const a)@ work out? I don't know. Oh, what about the event @Monoid@? How do I combine functions? I think I'd have to use the sets, which spoils the orthogonality some. * Another idea: use events as list- or maybe-valued behaviors. Don't have @Applicative@ and @Monad@ instances. Monoid works great. It's the list or maybe monoid lifted through the behavior AF. * How to define the semantics of @switcher@? * Oh, hey. Here's another idea still. Return to defining events as future reactive values. ++++ * De-emphasize semantics (explored elsewhere). Focus on new stuff: RNF, futures, reactive values, application of improving values, new implementation of improving values. * Introduce behaviors and events with informal semantics. Mention that some of the CFRP functions correspond to instances of standard classes. * Decompose reactive behaviors into reactive values and time functions. * Future values with semantics. Take care with @min@/@mappend@. * Decompose events as future reactive values, exactly as in my implementation. === === * Reactive paper: ++++ * Rethinking yet again. ++++ * I can keep the semantics section but replace the events part with an unfactored version (with the time ordering assumption) and without @Applicative@ and @Monad@. The @Monoid@ instance merges temporally. Maybe add @Applicative@ and @Monad@ in a later section. * Then "Intermission: from semantics to efficient implementation". Describe the obstacles to efficient implementation: impossible time comparison; expensive searching in events (non-incremental sampling); pull-based evaluation (not knowing when value changes). * Future values (to begin addressing time comparison). Will be augmented in "Improving values". * Reactive normal form * Decomposing behaviors: reactive values and time functions. * Decomposing events: future reactive value. I'm not sure I want take this approach. I think so, however, as (a) it's cool, and (b) it simplifies the event @Monoid@ and (for later) the event @Monad@. * Improving values * Improving on improving values === === * [[Responded to Niklas| http://evolve.awakeningcompassion.com/posts/vague-demands-and-honesty/#comment-191]] on my "NVC Evolves" blog. * Mostly worked on Reactive paper. It's coming together. * Convergence day: vernal equinox, full moon, Good Friday, 2 year anniversary of Dad's transition. * There's quite a lot of live energy in the nvc-evolves and emergence-of-nvc groups. * (I)An-Ok forwarded part of an Essay by Carl Rogers opposing certification and licensure. An excerpt: ++++> As soon as we set up criteria for certification - whether for clinical psychologists, for NTL group trainers, for marriage counselors, for psychiatrists, for psychoanalysts, or, as I heard the other day, for psychic healers - the first and greatest effect is to freeze the profession in a past image. This is an inevitable result. What can you use for examinations? Obviously, the questions and tests that have been used in the past decade or two. Who is wise enough to be an examiner? Obviously, the person who has ten or twenty years of experience and who therefore started his training fifteen to twenty- five years previously. I know how hard such groups try to update their criteria, but they are always several laps behind. So the certification procedure is always rooted in the rather distant past and defines the profession in those terms. === * Reactive paper: ++++ * Find a clear link between the list-of-occurrences model and the future-reactive-value model of events. The former matches a simpler intuition, while the latter seems a bit easier to manipulate. I can translate easily between the two models. Relate to Fran's original "Event" model, which corresponds to my notion of future value. Maybe start by considering @switcher@ and simplify to another === * Reactive paper: ++++ * How do I use these improving times? * Clarify how data-driven. ++++ * Try this: a function representation suggests pull, i.e., put in a sample time and get out a result. Make the data structure consist of the distinct values (assuming discretely changing). The representation must say when the changes take place, so that behaviors can be combined concurrently (via @(<*>)@). Hence reactive values. Still, what's data-driven here? The implementation steps through a reactive value, blocking until events occur. === * Idea: use nested @max@ calls for future times. For each non-occurrence, throw in another @max@. * Idea for interface: schedule an action at a future time. For reactive values, that action performs one output and schedules another action. === * Reactive paper: ++++ * I want to drop the terms "pull-based" and "push-based" evaluation, in favor of "demand-driven" and "data-driven". Clarify what these latter pair "demand-driven" can mean in the context of functional programming and FRP in particular. Demand-driven is simple enough: each time sample is a demand. Set a target sample/frame rate or just sample as frequently as possible. Lots of functional values get computed, and one output action. Data-driven is usually associated with imperative frameworks: actions (side-effects) are executed in response to an event (new data). Lots of update actions, including one output. What could //functional// data-driven evaluation be? Instead of a function, use a lazy data structure, and start traversing it. As usual, accessing a data structure component returns right away if the component has already been accessed. Otherwise the component gets computed, which delays the access operation. Typically, the delay is short, and there is a single thread of evaluation. Now suppose that the value's computation has not just been postponed as an optimization (laziness), but it depends on information about the future. Then a lazy functional framework can simply wait, as usual, to return a value when it's been computed. Because this wait can be substantial, and no amount of computing effort will hasten completion, the implementation will likely want to perform other computations while waiting. === * Test out my new improving values implementation. How to plug it in? The type: ++++ \begin{code} data Improving a = Imp { exact :: a, compareI :: a -> Ordering } \end{code} === For each event occurrence, I just have to come up with @exact@ and @compareI@. For @exact@, use an explicit @MVar@ or maybe a channel with @getChanContents@. For @iv compareI t@, try two tactics in parallel: ++++ * Use the exact value: @exact iv compare t@. * Wait until all time @t@ occurrences have been processed, then confirm that @iv@ is still unknown, and say @GT@. How to wait? Set up a one-shot event in the same single thread as all related event processing. === * Related work: ++++ * "Dynamic optimization for FRP using GADTs": uses a more complex representation, with several special constructors for pattern matching, which also introduces overhead; uses more complex types (GADTs). * "Parallel FRP": introduces nondeterminism into the semantics (really?); uses Linda for concurrency. * "E-FRP": probably the most closely connected, focused on "event-driven" systems; time is handled discretely; events may not occur simultaneously; resource-bounded (as RT-FRP); generates code. === * Working on the Reactive paper. ++++ * Related work. * Monad semantics and implementation for events. === * [[Twitter from Emacs| http://www.emacswiki.org/cgi-bin/emacs/twit.el]] * Reactive: ++++ * Strengthen the tie from semantics to implementation. Semantics of @stepper@: ++++ \begin{code} at (a stepper e) = \ t -> last (a : occsTo e t) occsTo :: Event a -> T -> [(T,a)] occsTo e t = [a | (tah,a) <- occs e, tah < t] \end{code} === Switch to an effective definition of @occsTo@: ++++ \begin{code} at (a stepper e) = \ t -> last (a : before (occs e) t) occsTo e = before (occs e) before :: T -> E a -> a -- monotonic -- specification before t os = [a | (tah,a) <- os, tah < t] -- implementation before t os = map snd (takeWhile ((< t) . fst)) os \end{code} === Unrolling and simplifying the definition of @before@: ++++ \begin{code} before _ [] = [] before t ((tah,a):os') | tah < t = a : before t os' | otherwise = [] \end{code} === And then adding the context for @before@: ++++ \begin{code} lastBefore :: a -> T -> E a -> a lastBefore a0 t os = last (a0 : before os t) lastBefore a0 t [] = last (a0 : []) = a0 lastBefore a0 t ((tah,a):os') | tah < t = last (a0 : a : before t os') = last (a : before t os') = lastBefore a t os' | otherwise = last (a0 : []) = a0 \end{code} === Now add the definition of @occs@: ++++ \begin{code} stepperAt :: a -> Event a -> Time -> a stepperAt a0 e t = (a0 stepper e) at t = last (a0 : before (occs e) t) = lastBefore a0 (occs e) t \end{code} === Introducing the @Event@ representation: ++++ \begin{code} stepperAt a0 e t = lastBefore a0 (occs' (-infinity) e) t = stepperAt' (-infinity) a0 e t stepperAt' :: FTime -> a -> Event a -> Time -> a -- specification: stepperAt' t0h a0 e t = lastBefore a0 (occs' t0h e) t -- implementation: stepperAt' _ a0 (Event (infinity,_)) t = a0 stepperAt' t0h a0 (Event (tah, a Stepper e')) t | t1h < t = a | otherwise = stepperAt' t1h a e' where t1h = t0h max tah \end{code} === To simplify, use @rat@ with a reactive value instead of @stepperAt@ with value & event. ++++ \begin{code} rats :: Event a -> Time -> a rats (a0 Stepper e) ts = (a0 stepper e) ats ts = map (lastBefore a0 (occs e)) ts = map (lastBefore a0 (occs' (-infinity) e)) ts rats' (-infinity) (a0 Stepper e) t \end{code} === * Decompose @occs@. Extract time/value pairs, and then make monotonic. ++++ \begin{code} occs = monotone . timeVals timeVals :: Event a -> [(FT,a)] timeVals (Event (infinity, _)) = [] timeVals (Event (tah, a Stepper e')) = (tah,a) : timeVals e' monotone :: [(FT,a)] -> E a monotone ps = monotone' (-infinity) ps where monotone' _ [] = [] monotone' t0h ((tah,a) : ps') = (t1h,a) : monotone' t1h ps' where t1h = t0h max tah \end{code} === === * Reactive paper: ++++ * Lots of great comments from Mike Sperber, and a pointer to the relevant part of [[his dissertation| http://w210.ub.uni-tuebingen.de/dbt/volltexte/2001/266/]] (section 12.5). * A "monad (homo)morphism" is a natural transformation that preserves the structure of return and join, exactly as in my semantics. Some references: ++++ * [[Monads for functional programming| http://citeseer.ist.psu.edu/wadler95monads.html]] * [[Comprehending Monads| http://citeseer.ist.psu.edu/wadler92comprehending.html]] (called just "monad morphism" in section 6). * [[The Category of Monads| http://incoherantstudy.blogspot.com/2007/11/category-of-monads.html]] === Collectively, I could use the term "(type) class homomorphisms". === * Haskell SDL example (bouncing text screensaver): http://hpaste.org/6743 . * [[Haskell & category theory| http://en.wikibooks.org/wiki/Haskell/Category_theory]]. Monad laws in terms of @join@: ++++ # @ join . fmap join = join . join@ # @ join . fmap return = join . return = id@ # @ return . f = fmap f . return@ # @ join . fmap (fmap f) = fmap f . join@ === * Applicative functor laws: ++++ * //identity//: @pure id <*> v = v@ * //composition//: @pure (.) <*> u <*> v <*> w = u <*> (v <*> w)@ * //homomorphism//: @pure f <*> pure x = pure (f x)@ * //interchange//: @u <*> pure y = pure ($ y) <*> u@
*  The Functor instance should satisfy @fmap f x = pure f <*> x@
*  If @f@ is also a Monad, define @pure = return@ and @(<*>) = ap@@.
===
* Reactive paper: ++++
*  I put in some "morphism" language.  I'd like to explore this technique more deeply in another paper.
*  I'm not sure the "data-driven" bit is as clear as it might be.  Say that looking at next event occurrence causes the thread to block.  When the event occurs, the thread unblocks.
*  I'm having doubts about the neat trick of representing events as future reactive values.  There's the awkwardness of maintining temporal monotonicity, which is easy to handle in the list-of-occurrences representation, via the @monotone@ pass in @join@.  Moreover, having the representation correspond to the semantics makes the correctness unmistakable.  I liked how the future-reactive representation simplified the definitions of @(<*>)@ and @join@ for @Reactive@ and of @join@ for @Event@.  See if I can translate the simplicity to the list-of-occurrences representation.  Also, the representation of never-occuring event is simpler: an empty list.  Oh!  How could event filtering work?  Suppose the filter eliminates the last occurrence, turning a non-empty list to an empty one.  The emptiness test could block, and hold up the comparison with occurrences that happen before the removed occurrence.  Wow -- that's a darn subtle problem!  Explain it in the paper as a reason not to use the list-of-occurrences representation.
* Show that event filtering is easily defined via @(>>=)@.
===
* Reactive paper: ++++
*  Relate to Lula: ++++
*   Carefully examines and addresses issues of events and blocking
*   Determination times
*   Eliminates overhead of event non-occurrences
*   Did not guarantee semantic determinism
*   Inspiration for this design
===
*  Ripped out "Monotonic sampling" and wrote a new version.  Nice & simple!
*  Axed the first detailed figure on improving values.  Refer to Warren B paper instead.
*  Future work: ++++
*   Extensive testing and measurement, particularly in the new implementation of improving values, using |unamb|.
*   Consider a zipper representation for bidirectional sampling.  Efficient in time but still leaky in space.
===
===
* Submitted my ICFP'08 paper, [[Simply efficient functional reactivity| http://conal.net/papers/simply-reactive]].
* Talked with Eric Shippam at [[Communications/Advantage| http://communicationsadvantage.net]].  They're bringing broadband wireless into local rural areas, including ours.  Sent them our house's GPS coords.
* Poking around at [[jsMath| http://www.math.union.edu/~dpvc/jsmath]].  Get LaTeX-specified math into web pages.  If the user installs TeX fonts, the result zooms with the text and looks great on the screen and paper.
* Relearning to profile ghc packages. Added {{{--enable-library-profiling}}} to my-cabal-make.inc.  Did TypeCompose and simply-reactive.  Next: wxhaskell.  Try out the new cabal version on hackage, when [[hackage| http://hackage.haskell.org/]] is back up.
* Blogging: ++++
*  [[Point to my icfp submission| http://conal.net/papers/simply-reactive/]]
*  Type class morphisms
*  Type composition and class morphisms
*  Latency and frp
*  Unambiguous choice -- a tool for semantically determinate concurrency
===
* Upgraded my [[technical blog| http://conal.net/blog]].  Got into terrible trouble with the "maintenance mode" plugin, which locked me out of my blog.  Got through it, though traumatized.
* cnvc-cert conversation about the term "triggered".
* Writing blog post "Fun with type class morphisms".
* Processing comments on my paper.
* Thoughts on type class morphisms and type composition: ++++
*  My formulation of @at@ could be a bit simpler. ++++
\begin{code}
at (O rf) == join (fmap apply (rat rf))
== rat rf >>= apply
\end{code}
===
===
* Some [[FRP discussion on Lambda-the-Ultimate| href=http://lambda-the-ultimate.org/node/2756]].
* Posted [[Simplifying semantics with type class morphisms| http://conal.net/blog/posts/simplifying-semantics-with-type-class-morphisms/]].
* Chatted with Christophe (vincenz) about [[Simplifying semantics with type class morphisms| http://conal.net/blog/posts/simplifying-semantics-with-type-class-morphisms/]].  Christophe and another reader had missed the distinction between the @Behavior@ type and its semantics.  I added a (hopefully clarifying) comment to the post.
* Spent some time tracking down a problem with my blog.  The block features of markdown don't work for other people leaving comments.
* Started writing blog post "Composition and type class morphisms".  Maybe I'd better write a post about type composition first and then bring together type composition and type class morphisms.
* Trying out {{{cabal install}}}.  Changing my {{{c:/Documents and Settings/Conal/Application Data/cabal/config}}}: ++++
*  {{{compiler}}}: was "GHC", now "ghc"
*  {{{cachedir}}}: was "C:\\Documents and Settings\\Conal\\Application Data\\cabal\\packages", now "c:/Haskell/packages/"
===
* Fifteen puzzle: ++++
*  Each tile can see in four directions, distinguishing free vs occupied.
*  How to move pieces?  Some possibilities: ++++
*   Arrow key.  If a tile can move in that direction, it does.  Maybe broadcast the direction to all the tiles, and let each decide whether to more.  Or "tell" a specific tile when it is to move, and never mind about the tile's four-directional vision.  By telling a tile, I mean that the tile's index occurs in an event occurrence.
*   Mouse click.  If the selected tile can move, it does.  Each tile feels the mouse click only when in the tile's region, and can highlight in some way.  Perhaps highlighting differently, depending on whether or not the tile can move.  Use an overlay shows whether the tile can move and, if it can, in which direction.
*   Mouse drag.  If the selected tile can move in the dragged direction, it does.
=== The first two are probably the most convenient.  The third could be cool for a different game, perhaps Sokoban.
*  I'd like to make the tiles look good: use beveled and/or smoothed edges and maybe a neat bump-mapped texture.
*  For environmental information, I'll want some combinators that I couldn't implement at all efficiently in a demand-driven FRP engine.  In particular: ++++
\begin{code}
arrayE :: Ix i => (i,i) -> Event (i,a) -> Array i (Event a)
\end{code}
=== Similarly for other indexed structures, like maps and various kinds of trees.  Can I implement @arrayE@ efficiently in Reactive?  I want a small amount of work done per occurrence, independent of the size of the array.
*  Track the location of the blank.  Snapshot that location with the @move@ event, which carries a direction.  Then determine which of the 16 locations, if any, is next to the blank in the opposite direction.  Then which tile is currently in that location, yielding an @Event (Loc,Tile)@, which is fit for @arrayE@.
*  Backing up, how can I define which tile is at a given location?  Suppose I have an @Array Loc (Reactive Tile)@.  I want a combinator something like ++++
\begin{code}
(!*) :: Event (i,a) -> Array i (Reactive b) -> Event (a,b)
\end{code}
=== Hm.  I'd worry about time/space leaks with @Array i (Reactive b)@.  Alternatively, I could use @Reactive (Array i b)@, and I wouldn't need @(!*)@.  But how can I maintain that array efficiently?  Ideally, I'd maintain it destructively, which I don't know how to do safely.  (Consider @snapshot@.)
*  Would an @Arrow@ interface help?  It would prevent any code from hanging onto the array of signals.
===
* Perhaps the reason that we can move forward in time but not backward is simply that our Reality was easier and/or cheaper to implement that way.  Ditto for remembering the past and not the future.  For instance, a zipper representation for reactive behaviors could easily give access to the past, even efficient access to the //recent// past.  But it would cause a huge space leak.
* On memory (and FRP): We respond not to the past, but to our present (filtered) memory of the past.  Nice trick, allowing the distributed representation and efficient memory management.
* The FRP semantic models are much more expressive than necessary, and more expressive than can be implemented reasonably well.  In classic, pre-arrow FRP, one difficulty is time transformation of behaviors that depend on external input.  In a sense, it's possible to implement, though the memory and latency requirements grow without bound.  In arrow-based FRP, the @Signal a -> Signal b@ model has the same issue.  If time transformation is not implemented, then we're not using the full semantic model, which then fails to be //[[universal| http://www.inf.ed.ac.uk/teaching/courses/fpls/note15.pdf]]//.  In other words, the model contain //junk//, i.e., values that are not the denotation of any expressible behavior.
* So, what are some more restrictive models, particularly ones that align with our (apparent) reality?  The classic Fran/FRP model requires (encourages?) magic/ESP in that behaviors know what other behaviors are up to.  That magic breaks temporal and spatial modularity of interaction.  Transforming an input-dependent behavior in space or time cannot automatically inversely transform the input, while the signal-function model can, which is the main strength I see in the latter model.  Could we replace @Signal a -> Signal b@ simply with @a -> b@?  I don't think that model covers integration or reactivity.  Then there's a differential approach: @a -> b'@ where @b'@ is some sort of derivative type for @b@, but some types don't have derivatives.  Perhaps combine these types of behaviors in a sum, or generalize the notion of derivative to handle both cases and discrete reactivity (say via Dirac impulses).  I'm intrigued with the idea of defining //all// values differentially.  It'd certainly encourage animation.  Discrete types (e.g., @Bool@) would require some special treatment.  (Perhaps replace @if-then-else@ with a slow-in and slow-out interpolation.)  What would be a nice generalization of @integral@ and @switcher@ or @accumR@?  Note that they all take an initial value and a means of changing the value, either discretely or continuously.
* The [[Haskell Symposium| http://haskell.org/haskell-symposium/2008]] submission deadline is June 23.  I wonder what I might write up?  Maybe the representation I was playing with for functions over continuous domains, including images.
* Fifteen puzzle / FRP: ++++
*  Every tile is described by the same bot (signal function), which maps a localized universe (in which the tile is always at the origin) to a location.
*  The location comes from integrating a velocity.  The velocity could be finite (e.g., linear or slow-in/slow-out) or have (infinite & instantaneous) impulses.
*  The location is used, inversely, to transform the puzzle's universe into the tile's perspective.
*  That universe can simply be a function from relative locations to contents.  Each tile sees itself at the center/origin of the universe.
*  Decompose the tile description into an outer perceptual layer and an inner logic.  The perceptual layer filters the tile-localized universe into much less information: the free/occupied status of each of its four neighbors in 2D, and a tapping impulse from above (user click).  That filtered information is fed to the logic layer.
*  I like Henrik's idea of using impulses in place of events, so that a single integral can yield piecewise-continuous results (possibly only discretely changing).  Consider how the notion could be generalized.  For instance, when I type, each character has a differential effect on my buffer.
*  Design the behavior data type so that impulses can occur only discretely.
===
* Fifteen puzzle / FRP: ++++
*  Go for continuous tile movement with slow-in/slow-out.  Use a spring model: each tile has a current position (and velocity) and a goal position.  Usually @current == goal@, but sometimes not, when a tile is in motion.  For the 15 puzzle, the goal changes step-wise.
*  Another idea: when the user clicks on a tile, a spring connection is created, which lasts until the tile is released.  The spring force
*  Keep delivering the spring's anchor location relative to the (sometimes moving) tile.  In the ODE then, there's no need to subtract the tile's position from its goal.  (The tile's position is always zero, relative to itself.)  Oh -- maybe a subtraction to account for grabbing it off center.  When neighboring tiles fully or partly block the given tile, an additional force counteracts the spring force.  Alternatively, project the acceleration vector onto the unit vector in the direction of the neighboring blank if any (replacing negative components with zero).
===
* The intractable denotational model I think of for @IO a@, namely @U -> (a,U)@ (where @U@ is the physical universe), is not nearly expressive enough to account for concurrency, which brings in the powerset of transition sequences.  How can anyone expect to write correct concurrent imperative programs?
* Another free wifi place in Sonora: HavaJava.
* Animated illustration project: ++++
*  Chatted with Andy Gill.  We're starting on a new library for animated illustration (2D & 3D), probably with some interaction as well.
*  Some sources of examples: ++++
*   [[A PGF and TikZ examples gallery| http://www.fauskes.net/nb/pgftikzexamples/]]
*   [[Graphviz gallery| http://www.graphviz.org/Gallery.php]]
*   [[Project mathematics videos| http://www.projectmathematics.com/]]
===
*  To start, we're going to play with some charts made of boxes, arrows, and text.  First [[Simple flow chart| http://www.fauskes.net/pgftikzexamples/simple-flow-chart/]].  I'm thinking we can do the graph layout using masses & springs, with specified rest lengths & angles.  Plop down the components at random (or at the origin) and watch them wiggle around.  Allow the user to move and/or twist.  Even the edges could have interesting physics, e.g., a curvable rod.  Play with interaction ideas.
*  Idea for numeric integration: use a rate-based scheduler.  Larger derivatives get stepped more often.  As a special case, near-zero derivatives wait almost forever to get scheduled.  Meanwhile, a derivative value can get updated thanks to //its own// derivative being stepped or some other specification, which causes its next integration step to get rescheduled.
===
* Playing with chart specifications. ++++
*  First cut is modeled after Pgf, wrapping Haskell-style.  The [[simple flow chart| http://www.fauskes.net/pgftikzexamples/simple-flow-chart/]] might become something like the following: +++
\begin{code}
import Pgf

-- block styles
decision = diamond   %+ draw %+ fblue %+ text_width (em 4.5) %+ text_badly_centered
%+ d3 %+ inner_sep (pt 0)
block    = rectangle %+ draw %+ fblue %+ text_width (em 5) %+ text_centered
%+ rounded_corners %+ minimum_height (em 4)
cloud    = ellipse   %+ draw %+ fred %+ d3 %+ minimum_height (em 2)
line     = draw      %+ no_latex'

-- fills
f20 col = fill (col!20)
fblue = f20 blue
fred  = f20 red

-- Position further for decision node
d3 = node_distance (cm 3)

-- nodes
init     = node block "initialize model"
expert   = node (cloud %+ left_of init) "expert"
system   = node (cloud %+ right_of init) "system"
identify = node (block %+ below init) "identify candidate models"
evaluate = node (block %+ below identify) "evaluate candidate models"
update   = node (block %+ left_of evaluate +% d3) "update model"
decide   = node (decision %+ below evaluate) "is best candidate better?"
stop     = node (block %+ below decide +% d3) "stop"

-- edges
edges = map pathl [
init --> identify
, identify --> evaluate
, evaluate --> decide
, decide -| node near_start "yes" update  -- what to do here??
, update |- identify
, decide --> node mempty "no" stop
] ++ map pathl' [
expert --> init
, system --> init
, system |- evaluate
]

pathl  = path line
pathl' = path (dashed %+ line)

flowChart =
chart [init,expert,system,identify,evaluate,update,decide,stop] edges
\end{code}
=== I'm using @(%+)@ as a short-hand synonym for @mappend@ on styles.  I've mimicked the PGF specification as closely as I could.  This operation could simply be reverse function composition.
*  Another idea: move the edges into the source nodes.  (Could as well place in targets.)  A fairly superficial change, I think.
*  Another approach: masses and springs (linear and angular).  Some edges have strong preferences about their lengths, while others don't care.  Every connection point of a node has a force going in and a position coming out.
===
* Imported posts from my old Blogger/Blogspot blog and hand-tweaked some strange bits.
* [[Markdownify: The HTML to Markdown converter for PHP| http://milianw.de/projects/markdownify/]].  Oh yeah, [[pandoc| http://johnmacfarlane.net/pandoc/]] can do that too: {{{pandoc --from=html --to=markdown}}}.
* Illustration library: ++++
*  I think I know how to handle masses & springs: model each rigid component as a function from force on it to position (say center).  Add more structure by composing with one function that sums forces applied to connection points and another that turns a single position into a position for each connection point.  Later, add torque.
*  Model edge corners as degenerate rigid objects, with zero size and mass.
*  Worked more on a PGF-like specification of graphs.  Going fairly well, though I think I'll prefer the mass & spring model.
===
* To me, pure functional programming means that all effects are hidden inside of the implementation of values.  These hidden effects include laziness, in which an evaluated thunk replaces the thunk.  Also pushing data onto call frames, for communication between functions.  Evolving the functional paradigm in the direction of purity requires addressing I/O.  The popular monadic solution, safely separates effects from functional code, but leaves the effects in the user model, where their complex (denotational) semantics (especially with concurrency) thwart simple and powerful reasoning and lead to very weak composability, as discussed by John Backus ("Can programming be liberated ...").  So monadic IO is not so much functional programming as a way for imperative and functional code to coexist in programs, while preserving simple (denotational) semantics of the functional part.  How else might we "address" I/O?  By seeing it as an implementation of something functional, such as persistence (for file writing/reading) and function-call data marshaling (for human I/O).
* Responded to [[a question on my blog| http://conal.net/blog/posts/simply-efficient-functional-reactivity/#comment-3059]] about how to make externally-sourced future times.
* Great visit in San Francisco with Peter & Rudy from Anygma/Nazooka.  Details pending.
* Chats with Ivan.
* I want to pull together thoughts on issues with arrowish vs classic frp style interface, especially in the context of a non-programming authoring tool.
* Masses & springs: ++++
*  I don't know how to give an elegant, non-reduandant specification of an undirected graph of masses and springs.  Here's a formulation that uses keys/labels for the masses, which are then mentioned as spring end-points.  For simplicity, I'll assume standard masses and initial velocities, providing only initial positions. ++++
\begin{code}
graph [('a',a0), ('b',b0), ('c',c0)] [('a','b'),('b','c')]
\end{code}
=== Or replace @a0@ etc with fuller specifications, including mass and appearance.  Btw, where do we get the initial positions?  Perhaps choose arbitrary distinct values, say distributed around a small circle.  More general graph: ++++
\begin{code}
graph :: Ord k => [(k,v)] -> [(k,k)] -> Graph k v
\end{code}
=== The @Ord@ constraint is simply so that I can use a finite map for efficient lookup.  Otherwise, I'd just assume @Eq@.
*  I'm not sure I really want @k@ in the @Graph@ type.  Maybe hide it.  Depends on what graph accessors are useful.
*  I like this separation between a general graph specification and the physical interpretation.  Also, the graph could be interpreted as directed or undirected.  For this use, undirected, since forces come in opposite pairs.  Here's an idea for yet another intermediate step, generalizing from forces.  Make a graph whose elements are functions from connected values to values. ++++
\begin{code}
tieGraph :: Graph k ([v] -> v) -> [(k,v)]
\end{code}
=== Maybe a specialized/optimized version for symmetric situations like force pairs: ++++
\begin{code}
tieSymG :: Num u => (v -> v -> u) -> Graph k (u -> v) -> [(k,v)]
\end{code}
=== For instance, @v@ is position, velocity, and mass, and @u@ is force.  We could almost use @Monoid@ instead of @Num@ here, but we need an inverse (@negate@).
*  Oh, oops -- what about spring stiffness?  Could place in the edges, as "labels".  Each edge label could be the function that maps two vertex values to a force. ++++
\begin{code}
graph :: Ord k => [(k,v)] -> [(k,k,e)] -> Graph k v e

tieSymG :: (Ord k, Num u) => Graph k (u -> v) (v -> v -> u) -> [(k,v)]
\end{code}
=== For our use, the edge function would include the spring rule with stiffness and rest length.  I think we'd drop the keys in the result (via @map snd@).  Or maybe drop them in @tieSymG@ and even the @Graph@ type.  I like this separation: having the edges know about springs and the vertices know about masses and forces.
*  Packaging up for convenience, compute motion from initial positions & masses: ++++
\begin{code}
springs :: Map k (V,R) -> [(k,k,R)] -> Map k (B V)
\end{code}
=== Keep the keys for correlating the resulting motions with additional info about the vertices.
*  Small variation: Put edges into the map.  Also, abstract out a graph type. ++++
\begin{code}
type Graph k v e = Map k (v,[(e,k)])

springs :: Graph k (V,R) R -> Map k (B v)
\end{code}
===
* Here's an idea for eliminating the labels: ++++
\begin{code}
-- Graph: a--b--c, all with default mass, stiffness, and rest length
abc = (n a0,[]    ) <@> \ a ->
(n b0,es [a]) <@> \ b ->
(n c0,es [b]) <@> \ _ -> empty
-- defaults
label = (,)
n   = label 1                         -- mass
e   = label (1,1)                     -- stiffness & rest length
es  = map e
\end{code}
=== This representation is very close to Martin Erwig's [[inductive graph| http://web.engr.oregonstate.edu/~erwig/papers/abstracts.html#JFP01]] formulation with a bit of abbreviation for symmetric graphs and a lambda trick to avoid the labels.  I think it could be mapped onto Martin's graph type by managing a counter for automatic node number generation.
*  More complex example: ++++
\begin{code}
--   a--b--c
--      |  |
--      d--e
abcde = (n a0,[]      ) <@> \ a ->
(n b0,es [a]  ) <@> \ b ->
(n c0,es [b]  ) <@> \ c ->
(n d0,es [b]  ) <@> \ d ->
(n e0,es [d,c]) <@> \ _ -> empty
\end{code}
===
\begin{code}
abcde :: G v e ()
abcde = do a <- node' a0 []
b <- node' b0 [a]
c <- node' c0 [b]
d <- node' d0 [b]
e <- node' e0 [d,c]
return ()

node' :: v -> [Node] -> G v (R,R) Node
node' v ns = node es v es where es = map e ns

node :: [(e,Node)] -> v -> [(e,Node)] -> G v e Node
\end{code}
=== What might the @G v e@ monad be?  It has a node counter state for generating new @Node@ values and it builds up a FGL graph as it goes. ++++
\begin{code}
newtype G v e a = G (State (Node, Gr v e) a)

node pre v suc = State (\ (n,g) ->((n+1, (pre,n,v,suc) & g),n))

runG :: G v e a -> (Gr v e, a)
runG (G st) = snd (evalState st 0)
\end{code}
=== BTW, keep the @Node@ type abstract.  Probably just a @newtype@ wrapper around @Int@.
===
* "[[VisualComplexity.com| http://VisualComplexity.com]] intends to be a unified resource space for anyone interested in the visualization of complex networks."
* Masses & springs: ++++
*  Idea: use @mdo@ to specify arbitrary directed graphs specifying only successors, rather than some successors and some predecessors. ++++
\begin{code}
--   a-->b-->c
--       ^   |
--       |   V
--       d<--e
abcde = mdo a <- node' a0 []
b <- node' b0 [c]
c <- node' c0 [e]
d <- node' d0 [b]
e <- node' e0 [d]
return ()
\end{code}
===
*  Then again, I could separate edges from nodes: ++++
\begin{code}
abcde = do a <- node a0
b <- node b0
c <- node c0
d <- node d0
e <- node e0
edge a b
edge b c
edge c e
edge e d
edge d b
return ()
\end{code}
=== Make this last form primitive, and use it to define the node-with-predecessors-and-successors and node-with-successors forms.  Simple.
*  Could use a prettier operator for edges, e.g., @a --> b@ or @a to b@.
*  Or some explicit layout in the edges, e.g., saying which way to go.  For instance, ++++
\begin{code}
abcde = do a <- node a0
b <- node b0
c <- node c0
d <- node d0
e <- node e0
a toE b
b toE c
c toS e
e toW d
d toN b
return ()
\end{code}
=== It's easy to over-specify layout in this way.  If we use angular springs, inconsistent specifications will tug at each other.
*  For edges that (visually) turn corners, use @toSW@, for instance.  Maybe insert invisible nodes.  For instance, ++++
\begin{code}
a toSW b = do c <- invisbleNode
a toS c
c toW b
\end{code}
=== More generally, ++++
\begin{code}
toDirs :: [Dir] -> Node -> Node -> G n () ()

toDir  :: Dir -> Node -> Node -> G n () ()
toDir = toDirs . pure

toW = toDir west
-- etc
\end{code}
=== Generalize @toSW@ with a higher-order formulation: ++++
\begin{code}
type Edger :: Node -> Node -> G n () ()
type Binop a = a -> a -> a

(&) :: Binop Edger
(ab & bc) a c = do b <- invisbleNode
a ab b
b bc c

toSW = toS & toW
toWN = toW & toN
toNE = toN & toE
-- etc
\end{code}
===
*  Of course, we really want decorated edges (@e /= ()@).
*  What info goes into edges?  Visual style (solid/dashed, arrow shape), rest length & direction, linear and angular stiffness (resistance to deviating from rest).  What goes into nodes?  Visual style (shape, rounded or angular corners, background & foreground colors), text/content, initial position/velocity, mass (resistance to force).
*  What uses can we make of the @G@ monad's return value?  One idea is is one or more nodes that form the interface to a graph fragment.  What else?
* I like the infix style for edges (e.g., @toSW@), but I think it thwarts compositionality.  For instance, how might I specify a //dashed// SW edge?  Well, perhaps I don't really have to.  Suppose some of the rendering were to use information about the node endpoints.  For example, edges emanating from 'source' nodes are dashed.  Suppose also that the edges leaving a decision nodes are somehow ordered and then labeled (yes/no), depending on the order.  Look for some separation of logical structure from style (model vs presentation).  Imagine a renderer taking a graph //and// a specification of style rules.
===
Diff arrays have an immutable interface, but rely on internal updates in place to provide fast functional update operator @//@.
When the @//@ operator is applied to a diff array, its contents are physically updated in place. The old array silently changes its representation without changing the visible behavior: it stores a link to the new current array along with the difference to be applied to get the old contents.
So if a diff array is used in a single-threaded style, i.e. after @//@ application the old version is no longer used, @a!i@ takes O(1) time and @a // d@ takes O(@length d@). Accessing elements of older versions gradually becomes slower.
Updating an array which is not current makes a physical copy. The resulting array is unlinked from the old family. So you can obtain a version which is guaranteed to be current and thus have fast element access by @a // []@.
===
* Yesterday, Holly & I got a second weed-eater.  It's the best one we've had: two-handled, with a powerful engine and heavy line.  With the padded harness/vest, the weight distributes well on my skeleton, so I'm more comfortable than with the lighter weed-eater.  We weed-ate the labyrinth, surrounding area, and the walking path from the road to the labyrinth.  If we work this hard most mornings, I think we'll get a big energy boost.
* Elegant refactoring of STM-based reactivity, in terms of IVals.  Started reworking the Reactive implementation.
* Reactive: ++++
*  Rethinking the @IVal@ interface, given input from Chris Kuklewicz.  Eliminate the style of primitive I've been using for futures and ivals, which yields a sink.  For ivals, replace the old primitive with the function I've using for caching.  Renamed: ++++
\begin{code}
ival :: STM a -> IVal a
\end{code}
===
*  Implement futures by composing of @IVal@ and semantic futures: ++++
\begin{code}
type FutureG t = IVal :. S.FutureG t
\end{code}
*  Got all of Reactive rebuilt and compiling on top of this new representation of futures.  I want a simple way to test.
*  Oops -- Ryan Ingram pointed out that our STM solutions have a serious flaw: any @retry@ will undo caching.
===
* Put together several modules for functional 3D geometry, with OpenGL rendering.
* Brent Yorgey has a [[New Haskell diagrams library| http://byorgey.wordpress.com/2008/04/30/new-haskell-diagrams-library]] built on Cairo.  Nice stuff.  Pretty examples!
* I'm finally installing gtk2hs (and prerequisites), so I can try Brent's library.
* Working on installing and building gtk2hs.  Lots of of difficulties.   The one I'm stuck on now is when {{{make}}} wants to call {{{hsc2hs}}} with with Cygwin-style paths.  Maybe Cygwin is no longer a viable environment for working with GHC.  So I grabbed the [[binary installer| http://haskell.org/~duncan/gtk2hs/gtk2hs-0.9.12.1.exe]], but it wants {{{ghc-6.8.2}}} while I have {{{ghc-6.8.2.20071221}}}.
* Thoughts on safe side-effects: ++++
*  One way to represent a pure value is via a mutable cell for which the only permitted assignments are with semantically equal values.  For example, consider a functional representation of improving values, with a semantic function: ++++
\begin{code}
newtype Improving a = IV [a] -- non-decreasing

meaning :: Improving a -> a
meaning (IV xs) = last xs
\end{code}
=== The next step is to identify one or more transformations on the representation that preserve semantics.  In this case @meaning (IV (a:b:cs)) = meaning (IV (b:cs))@.  The transformation has a cost and benefit: it loses some laziness and speeds up future inquiries.  So, whenever we've paid the cost anyway, we'll want the reap the benefit.  Specifically for @Improving@, when an operation requires looking at the second element of the list representation, discard the first.
*  This technique is at work inside lazy data representations in general.  A pure values is represented as a cell holding a thunk.  When more information is needed, the thunk is evaluated, resulting in a shell data structure (WHNF) around some more thunks, which is then stored in the cell, overwriting the thunk.  I'm suggesting generalizing the mechanism and allowing more instances of the generalization to be implemented on top of Haskell, probably with a proof obligation of semantic equivalence.
*  For future times, where would the successive approximations come from?  I don't want to eagerly inject a stream of non-occurrences.  So I don't really want to use the list-of-approximations representation.
===
* Mom showed me an Arthur Young video on the toroidal universe (from [[this collection| http://www.arthuryoung.com/archive/seminarone.html]]).  Since then, I've been imagining flowy 3D animations placed on the surface of a torus and other curvy 3D shapes.
* Thinking about how to specify and render 2D and 3D geometry.  I want the specifications to be the ideal shapes, but rendering will approximate with sets of primitives (triangles and quads). ++++
*  Idea: represent a shape exactly as the union of a sometimes-infinite stream of primitives.
*  When rendering, blast out only as many triangles as we have time for.  Place the bigger primitives first, so that later triangles contribute less and less.  Perhaps track the error, so we can make intelligent decisions.
*  Given a bunch of (ideal) shapes, rendering could go round and round, taking contributions from each.
*  Hm.  The ways I can think of to add detail have exponentially diminishing contributions of exponentially increasing numbers of primitives.  For instance, approximate a half-disk first as a single inscribed right isosceles triangle.  Then as two shallower triangles, one on each leg of the original.  Then four more shallower still, etc.
*  Another approach is to represent a surface as a function from some sort of accuracy specification to a finite set of primitives.  For instance, a number of facets, or ideally some kind of error bound.
*  Whatever the specification is, I want to do something sensible in the presence of spatial transformations.  For instance, specify the error bound in screen coordinates.
*  Oh yeah.  This general idea is called "dynamic tessellation".  Salim worked on it for trimmed NURBS at Sun, with Leon and Srikanth.
===
* Correspondence on the [[NVC Evolves group| http://groups.google.com/group/nvc-evolves]], speculating on needs behind certification.
* More functional 3D stuff.  Wrapping some of OpenGL GLU's handy tessellators.
* Fun progress on functional 3D.  Working on a tablet with rounded corners.  For fun, make the sides fully rounded, and use spheres for the four corners.  For now, I'll full spheres and cylinders on the inside, though I waste 1/2 of the cylinder surface and 3/4 of the sphere surphace.  Maybe find or build a version of cylinder and sphere that take angle ranges.
* For building my own curved surfaces, use automatic differentiation, for tangent vectors, from which I can make normals.
* Finished two versions of Andy's tablet.  Made lots of improvements in the process.
* Playing with derivatives and parametric curves (in 2D and 3D) and surfaces. ++++
*  In Vertigo, I made curves and surfaces and then extracted derivatives.  Since I was generating code, I had access to a symbolic representation of these functions.  Instead, I want to build the derivatives (infinite towers of them) into the range type. ++++
\begin{code}
type a :--> b = ... -- linear maps
newtype a :~> b = DFun (a -> (b, a :--> b))  -- function with derivative.
\end{code}
===
*  What's this linear map type?  It doesn't have a purely parametric representation, so I'll use a data type family. ++++
\begin{code}
-- | Provide a representation for linear maps from a to o.
class LinearMap a o where
-- | Linear map
data a :--> o :: *
-- Apply
lmap :: (a :--> o) -> (a -> o)

-- Standard instances for scalar domains
instance VectorSpace o Float => LinearMap Float o where
data Float :--> o = FloatLM o
lmap (FloatLM o) = (*^ o)

instance VectorSpace o Double => LinearMap Double o where
data Double :--> o = DoubleLM o
lmap (DoubleLM o) = (*^ o)

instance (VectorSpace o s, LinearMap a o, LinearMap b o)
=> LinearMap (a,b) o where
data (a,b) :--> o = PairLM (a :--> o) (b :--> o)
PairLM ao bo lmap (a,b) = ao lmap a ^+^ bo lmap b

instance (VectorSpace o s, LinearMap a o, LinearMap b o, LinearMap c o)
=> LinearMap (a,b,c) o where
data (a,b,c) :--> o = TripleLM (a :--> o) (b :--> o) (c :--> o)
TripleLM ao bo co lmap (a,b,c) =
ao lmap a ^+^ bo lmap b ^+^ co lmap c
\end{code}
=== When type synonym families are working in a released ghc, switch to them.
*  Adding three more methods allows me to define @a :--> o@ as a vector space.
*  Oh -- that function-with-derivative type has only the //first// derivative.  I'd like infinitely many.  Jerzy's formulation of AD (automatic differentiation) identifies @a :--> b@ with @b@, which only works for scalar @a@.  Jeff Siskind and Barak Pearlmutter published some relevant work.  See Barak's comment on [[Lennart Augustsson's blog post| http://augustss.blogspot.com/2007/04/overloading-haskell-numbers-part-2.html]].
*  I want derivatives so as to find normal vectors.  For a surface, one usually takes the cross product of two partial derivatives (and then normalizes).  If I think of the pair of partial derivative vectors (equivalently, the Jacobian matrix) more abstractly as a single linear map, then what notion does the cross product implement?  Got some help on the #math freenode IRC.  I think the key is the notion of [[dual space| http://en.wikipedia.org/wiki/Dual_space]] //V*// of a vector space, which is "consist[s] of all linear functionals on //V//".  A [[linear functional| http://en.wikipedia.org/wiki/Linear_functional]], where "a [[linear functional| http://en.wikipedia.org/wiki/Linear_functional]] or linear form (also called a one-form or covector) is a linear map from a vector space to its field of scalars."  Is a normal (co)vector a linear functional then?
===
* Derivatives: ++++
*  Here's the structure I'm trying for infinite derivative towers, using linear maps as derivative values.  Here @a :~> b@ contains infinitely differential functions; @a :> b@ contains an infinite chain of derivatives; and @a :--> b@ is a linear map from @a@ to @b@: ++++
\begin{code}
type a :~> b = a -> a:>b

data a :>  b = D b (a :> (a :--> b))
\end{code}
===
*  I haven't been able to get my @LinearMap@ module quite as I want.  I got stuck on typing when defining an identity linear map, and covering pair domains.  I think I want an identity to generalize the (scalar) derivative value 1, just as the zero linear map generalizes the value 0.  How did I get stuck?  I'm representing linear maps via a type family indexed on the domain type.  My representation of linear maps from @(a,b)@ is a pair of linear maps, one from @a@ and one from @b@.  For instance, @R2 :--> R3@ is represented by @(R :-->R3, R :-->R3)@.  To apply such a pair map, I apply each map and add the results.
*  How can I handle composition of linear maps?
\begin{code}
data a :> b = D b (a :--> b)
\end{code}
=== Write down the chain rule: ++++
\begin{code}
infix 0 <=>
(>-<) :: VectorSpace b s => (b -> b) -> (b -> s)
-> (a :> b) -> (a :> b)
f >-< d = \ (D u u') -> D (f u) (d u *^ u')
\end{code}
=== Then use function-level overloading of numeric classes to make the derivative functions easy to specify. ++++
\begin{code}
instance (Num b, VectorSpace b b) => Num (a:>b) where
fromInteger = dConst . fromInteger
D u u' + D v v' = D (u + v) (u' ^+^ v')
D u u' * D v v' = D (u * v) (u' ^* v ^+^ u *^ v')
negate = negate >-< -1

instance (Fractional b, VectorSpace b b) => Fractional (a:>b) where
fromRational = dConst . fromRational
recip        = recip >-< recip (^2)

instance (Floating b, VectorSpace b b) => Floating (a:>b) where
pi    = dConst pi
exp   = exp   >-< exp
log   = log   >-< recip
sqrt  = sqrt  >-< recip (2 * sqrt)
sin   = sin   >-< cos
cos   = cos   >-< - sin
sinh  = sinh  >-< cosh
cosh  = cosh  >-< sinh
asin  = asin  >-< recip (sqrt (1-(^2)))
acos  = acos  >-< recip (- sqrt (1-(^2)))
atan  = atan  >-< recip (1+(^2))
asinh = asinh >-< recip (sqrt (1+(^2)))
acosh = acosh >-< recip (- sqrt ((^2)-1))
atanh = atanh >-< recip (1-(^2))
\end{code}
===
===
* Derivatives again: ++++
*  Blog post [[Beautiful differentiation| http://conal.net/blog/posts/beautiful-differentiation/]].
*  I know how to do derivative towers for scalars and first derivatives for vector spaces, but not how to do derivative towers for vector spaces.  Since a first derivative is a linear map, a second derivative is a linear map whose range type is linear maps, which is isomorphic to a //bi//-linear map.  If I had maps more generally than linear, then I could represent a linear map to linear maps as a bilinear map, thus reducing the range type.
*  Do I really need a special representation of linear (or multi-linear) maps?  Why not simply represent via a function?  I already have vector space and inner product space instances for functions.  Seems to work out fine, so far.
*  Make an @Arrow@ instance for linear maps.   (@Category@ really, since there's no @arr@.)  Note ++++
\begin{code}
first :: (a :--> b) -> ((a,c) :--> (b,c))
\end{code}
*  The chain rule I'm using, @(>-<)@, now uses scalar multiplication, and so is limited to scalar functions.  To generalize, replace multiplication by //composition// of linear maps.  Currently, I have a funny asymmetry between function and argument.  The arguments are @a:>b@ and the functions are @a:>b -> a:>b@.  Regularize by using a type of infinitely differentiable functions
\begin{code}
newtype a :~> b = DFun (a -> a:>b)
\end{code}
=== Define composition using the chain rule.  See how linear map composition falls out.  Do it first for single derivatives and then for towers.
===
* Puzzling over how to combine (a) derivatives as linear maps (thus handling derivatives of non-scalar functions) with (b) infinite derivative towers.  I have each by itself, but not yet the combination. ++++
*  Look at the scalar chain rule: ++++
\begin{code}
(>-<) :: (Num a) => (a -> a) -> (Dif a -> Dif a) -> (Dif a -> Dif a)
f >-< d = \ p@(D u u') -> D (f u) (d p * u')
\end{code}
=== What makes the tower version work is that multiplication is defined on @Dif@, not just values.
*  Here's a version for derivatives as linear map, but only a single derivative: ++++
\begin{code}
(>:<) :: (u -> v) -> (u -> u:-->v) -> t:>u -> t:>v
f >:< d = \ (D u u') -> D (f u) (d u <<< u')
\end{code}
=== Here @(<<<)@ here is composition on linear maps, which is *not* defined on @Dif@.  Linear map composition generalizes multiplication if we think of the multiplicands as linear maps that scale.  That is, @scaleLM (u*v) = scaleLM u <<< scaleLM v@.
*  So I guess the missing piece is extending linear map composition to @Dif@ in the same way numeric functions are extended.
*  The derivative rule for multiplication: ++++
\begin{code}
p@(D x x') * q@(D y y') = D (x * y) (x' * q + p * y')
\end{code}
=== Generalize to composition of linear maps (notes on paper, somewhat handwavy): ++++
\begin{code}
p@(D u u') <<< q@(D v v') = D (u <<< v) ((u' <<< q) + (p <<< v'))
\end{code}
=== Not quite.  Types don't work out.
===
* [[Statically Typed Linear Algebra in Haskell| http://ofb.net/~frederik/stla/]]
* Wrote & turned in ICFP reviews
* Oh!  I just realized that my parametric surfaces already work with derivative towers.  I don't have to adapt them, because I kept the types generic.  For instance, ++++
\begin{code}
torus :: (Floating s, VectorSpace s s) => (s,s) -> Surf s
\end{code}
=== where @s@ is the coordinate type.
* Cute idiom for pairs of consecutive list elements: @zip <*> tail@.  Picked up from EvilTerran on #haskell.
* [[AlphaGrip Keyboard & Trackball| http://reddit.com/goto?id=6hvoo]].
* [[Comment on a blog post| http://reddit.com/info/6iv5u/comments/]] ++++>
> pure object orientated languages allow you to model the world the way it is.
The world, in our experience, is temporally continuous, while (imperative) OO languages impose temporal discreteness. If you want to eliminate this functional mismatch between programming paradigm and the world, I think you'll be forced toward a math-style paradigm like functional programming (and particularly something like functional reactive programming).

If you want a programming paradigm that reflects a quantum physics reality, then I imagine you'd have to get mathier yet.
=== Also, ++++>
> Ron's summary is that Haskell forces you to think the way it thinks (mathematically) rather than allowing you to express things the way you think.

In addition, Haskell frees you from thinking the way imperative programming languages have already forced programmers to think. Imperative languages were designed to reflect the original computers. The result is that not only do people program machines, but in the process machines (via languages that model them) reprogram people. The most difficult part of declarative programming paradigms may be the required deprogramming.

To be more precise, I don't think it's Haskell we're talking about here, but rather (pure) functional programming. Or the pure subset of Haskell. Haskell also supports imperative programming, via IO, STM, ST, etc.
===
* Wow: [[Perceptive Pixels multi-touch screen demo| http://reddit.com/goto?rss=true&id=t3_6iqpf]].
* I want to explain what the differentiation stuff means (see [[Beautiful differentiation| http://conal.net/blog/posts/beautiful-differentiation/]]).  What are these functions, and in what sense are we using the chain rule?  I think the idea is that a value of type @a :> b@ is a structure containing, for some @f :: a -> b@ and @x :: a@, the values @f x@, @f' x@, @f'' x@, ....  The funny thing is that in typical AD, one doesn't explicitly pass around these functions.  Explain why not to compose a @b::>c@ with an @a -> b@, namely that the result is not a derivative tower.
* What about temporal derivatives.  What to do about discrete types (and non-vector-space types)?
* Got parametric surfaces rendering.  Very broken: ++++
*  Slow
*  The sphere is a (bi-)cone
*  Half of the cone is a dull gray
*  On the other half, the curvature seems to be concave where i'd expect convex.
===
* I've been tinkering quite a lot with how to formulate parametric surfaces.  I just stumbled on a very simple solution.  Start with surfaces defined as follows: ++++
\begin{code}
type Two   s = (s,s)
type Three s = (s,s,s)

type HeightField s = Two s -> s
type Surf        s = Two s -> Three s

type Curve2 s = s -> Two s
type Curve3 s = s -> Three s

type Warp1 s = s -> s
type Warp2 s = Two   s -> Two   s
type Warp3 s = Three s -> Three s
\end{code}
=== A typical definition might be  ++++
\begin{code}
sphere :: Floating s => Surf s
sphere (u,v) = (cos theta * sin phi, sin theta * sin phi, cos phi)
where theta = 2 * pi * u
phi   = pi * v
\end{code}
=== Though I'd really refactor @sphere@ more elegantly.  For instance, ++++
\begin{code}
-- Profile product
profile :: Num s => Curve2 s -> Curve2 s -> Surf s
profile curve prof (u,v) = (cx*px, cy*px, py)
where
(cx, cy) = curve u
(px, py) = prof  v
\end{code}
=== Then  ++++
\begin{code}
sphere2 = profile circle semiCircle
sphere3 = profile semiCircle circle
\end{code}
=== Here's the neat thing about all these definitions: they're generic over coordinate type, as long as it's in @Floating@.  In particular, let @s = Two t :> t@. ++++
\begin{code}
type USurf = forall s. Floating s => Surf s

type VSurf = forall t. (VectorSpace t t, Floating t) => (Vector2 t ::> Vector3 t)

fToSurf :: USurf -> VSurf
fToSurf f = vec3 . f . split2 . dId
\end{code}
=== where ++++
\begin{code}
type a ::> b = a -> a:>b

dId    :: VectorSpace v s => v ::> v
vec3   :: (a:>s, a:>s, a:>s) -> a :> Vector3 s
split2 :: a :> Vector2 s -> (a:>s, a:>s)
\end{code}
=== I don't think I even have to define these universals.  Define my surfaces, and if the types are type-flexible enough, they'll work the the types I need.
* This new simple approach works!!  Awfully slow interpreted, but not too shabby when compiled -O2.  And I'm tessellating on the fly.  It'll be //much// faster if I cache tessellations.  Currently I represent shapes as a function from an error bound to a rendering action.  The OpenGL primitives data are in the action.  I'll want to replace that function with a data structure, e.g., an @IntMap@ (Patricia tree.)  I think I'll want to take the log of the bound, scale and round the result, and use the resulting integer as an index.  I can encapsulate this discretizing memoizer in a function ++++
\begin{code}
logMemo :: RealFrac s => s -> (s->a) -> (s->a)
\end{code}
===
* FieldTrip: ++++
*  Parameteric surfaces now memoize (multiple) tessellations via @logMemo@.  Works great!
*   Do I want one type for conventional 2D geometry and another type for procedural imagery?  The latter is like Pan, using the same math substrate as parametric surfaces and is probably what we want for textures.
*   Solid textures: define as @R3->Color@ and compose with a surface @R2->R3@ to get a surface texture.
===
* Thoughts on more intelligent adaptive tessellation: ++++
*  I have the derivative machinery.  Throw in interval analysis as well, and combine the two to compute derivative bounds.  If I support second derivatives, then I could tessellate based on the degree of non-linearity, which I think is the measure that matters (since tessellation is a piecewise-linear approximation).
*  I currently tessellate with a regular grid, n-by-m.  Often not a good choice.  Consider a sphere.  Near the poles, I'd like fewer longitude divisions and more latitude divisions.  I think it'd be fairly easy to to create a triangle strip based on differing numbers of points on the two sides, instead of maching numbers as I assume now.
*  Imagine adaptive tessellation/sampling for images as well, based on derivative bounds on images as a function from space to color.  Where color changes slowly, use fewer samples.  Where rapidly, use more.  Take care with discontinuities.  I wonder if there are useful connections with FRP, which also combines discrete and continuous change (though in time instead of space).
===
* The repo got into a strange state, in which I couldn't push my changes.  Andy eventually got it sorted out.
* Started a blog post "Beautifully generalized differentiation" about derivatives as linear maps (calculus on manifolds).  (Look for another title.)
* A bunch of tweaks: ++++
*  Moved some files & folders around to make the structure more regular.  Now examples, FieldTrip, and FieldTrip-GLUT are all siblings and all have src folders.
*  I got pretty lost in Glut.hs, and so did some simplifying (I hope!) changes.  I hope you like the result so far.  In the process, I switched from behaviors to reactive values, while I was getting a handle on things.  I can switch back.  For a notion of "time", I made a stepper in the GlutEvents type (now called GlutEnv, since it's not just events).
*  This new version of Glut.hs is really data-driven, as in section 8 of the Simply paper.  It only triggers a rendering if the scene changes.  For time-dependent scenes, the idle event will trigger an update, as well input events.  For non-time-dependent scenes, there ought to be only negligible cpu use when the user is not interacting.  That's a main goal of this FRP implementation architecture, but we'll have to see if it's really getting there.
*  If changes come in faster than they can be handled, they'll get dropped rather than piled up.
*  Oh -- I just realized that I didn't execute the action immediately but just queue it for the next idle event.  For that reason, there will be more latency than is necessary.  I'll have to think about how to eliminate this latency while keeping (a) thread-safety and (b) the no-pile-up property.
*  Data.Integral in in FieldTrip-GLUT, but not yet tested.  It's pretty simple, though.
*  I requested a new project "VectorSpace" on code/community.haskell.org.  When it's working, I'll make a new repo there and place a dependency on it in simply-reactive in FieldTrip.
===
===
* Chatting with paolino about making cabal dependency analysis much simpler.  See chat record.  The general problem that interests me is how to describe time-varying *relationships* between time-varying values.  Is FRP already sufficient?  Consider that @Reactive@ is a monad and so has a @join@, which turned reactive reactive values into reactive values.  Besides make/cabal, another example (I think) of the general problem is a game (simulation) in which objects move around and react to what's in their vicinity.  For instance, the fifteen puzzle (see [[2008-04-13]], [[2008-04-14]], and [[2008-04-15]]).
* To think about today: ++++
*  Textures
===
* //Beautiful differentiation// blog posts ++++
*  I have one post maybe ready to go, called "What is a derivative, really?  Beautiful differentiation, part 2".  Through examples, the post leads up to the notion that derivative values are linear maps.  Before publishing, I'd like a clearer sense of where I'm going.
*  Then retrace the development of differentiation code, using linear maps.  I get stuck past first derivatives.  Explain the goal and unanswered questions.  Invite help.  My current work-around is to maintain only first derivatives.
===
* Oh!  I want to put the linear map module into the VectorSpace package.  (Waiting for my project to appear on code.haskell.org.)
* Textures: ++++
*  An image is defined as a function @(R,R) -> Color@.
*  Appling an image to a parametric surface simply means matching up 3D position and colors, point-wise.  Given @surf :: Surface@ and @im :: Image@, the semantics of texturing (image application) is just @liftA2 (,) surf im@.
*  This simple texturing model isn't nicely composable, because the result of texturing has a different type from its arguments.  Instead, say that all surfaces have colors (and probably other surface properties) at its points.  Texturing then //combines// a given image with whatever is already there.  The most general interface I can think of is to apply a "surface" full of functions to a surface full of arguments.  The semantics of this kind of application is simply @(<*>)@ on functions.  Hm.  Not a "surface", but an "image" in the sense of Pan, i.e., completely polymorphic in the range type.  Maybe I want enough common structure in the range type so that the result can be manipulated spatially and perhaps chromatically, while still having some type freedom.
*  What about the implementation?  Simplest would be to sample the colors and the points synchronously in parameter space, placing a color with each point.  No texture map.  Great solution for simplicity, but probably not so great for quality & performance.  Typically, I think, textures get sampled more often than vertices.  Moreover, there's pixel hardware that runs (I think) in parallel with the vertex hardware, so we'd want to keep them both going.  However, give the simple thing a try first.
*  Suppose I could tessellate adaptively based on both spatial and chromatic variation, say using derivatives and interval analysis.  Then I could express even a constant color as an image, and rely on the bounds analysis to generate only a single sample.
*  Remember: the hardware does interpolation, so I really want to bound the error of the interpolation.  Requires second derivatives at least (higher for nonlinear interpolation).  I want to figure out how to get higher derivatives with linear maps.
*  Interval bounds also help a lot with view culling.  And normal cone culling.
*  I tried one color per vertex.  Looks pretty bad.
*  Tomorrow, go back to my Piq project and pull out some code for texture-mapping.  We'll have to revisit the problem of prompt finalization to recycle texture chunks.
===
* Andy asked last night: "What does a surface //mean//?".  I answered: a function in @R2->R3@ (restricted to within the unit square domain), but I'm not happy with that answer.  The parametric representation is just that: a //representation//.  For instance, I'd like reparameterization to affect only the representation and not the semantic model.  Also, the implicitness of the unit domain precludes parameter space trimming and genus>0 surfaces.  (Though one could texture with some transparent regions.)  I want the model to express //geometric// rather than //parametric// properties.  For instance, the parametric surface derivatives and their cross-product are purely parametric properties, while the normal (normalized cross product) is also a geometric property.
* FieldTrip texturing: ++++
*  Try a simple approach first.  Make a mipmap for each texture.
*  What max resolution to use?
*  Do we want to anti-alias the highest resolution version?  Perhaps so, using the error bound.
*  Progressive anti-aliasing?
*  There may be some weird and possibly useful feedback between progressive anti-aliasing and frame rate.  Static images can have cached samplings.  At first a sampling takes a while to compute, which may reduce the frame rate.  Then the frame rate could get increased and find that rendering is faster, so the frame rate stabilizes.  We probably want some hysteresis.
===
* I'd like to avoid conditionals, for better analysis.  I could use signum, e.g., @(1 + signum x)/2@.  Or a simpler primitive that has known derivatives.  Sticking with numeric operations instead of boolean may keep my image code more polymorphic and hence suitable for derivatives.
* Parametric code and the GPU.  Use the parametricity of my surface code to generate *symbolic* versions, which can then be turned into GPU code.  Deal with CSE as in Pajama or some other method.
* Dirac impulses and interval analysis.  Identify derivative discontinuities to focus sampling there.  If the derivative of signum (for instance) is said to be zero everywhere (e.g., as in my [[recent post| http://conal.net/blog/posts/beautiful-differentiation/]]) then derivative bound analysis will report very few samples are necessary.  Even a crude approximation to impulses would help.  Make the derivatives be extremely large at zero.
* Use an quadtree representation of general images (including surfaces), together with derivative bounds (derivatives plus interval analysis).  Second derivatives are important!!  I have to get higher derivatives in linear-map derivative towers.
* Revisiting derivatives and the product rule.  Start with just first derivatives.  Dropping the distinction between linear maps and functions: ++++
\begin{code}
D s s' * D t t' = D (s*t) (\ d -> s *^ t' d ^+^ s' d *^ t)
= D (s*t) ((s *) . t' + (* t) . s')
\end{code}
=== Note how the derivative term corresponds to the usual @r*s' + r'*s@, when derivatives are punned with a scale factor.  The derivative part of this definition says that a tiny change @d@ is transformed by @t'@ and then further magnified by @s@ //and// also is transformed by @s'@ and then further magnifies @s@.  For the @VectorSpace@ class, scalar multiplication on derivative towers has a similar definition: ++++
\begin{code}
D s s' *^ D u u' = D (s*^u) ((s *^) . u' ^+^ (*^ u) . s')
\end{code}
===
* Restoring the distinction between linear maps and functions: ++++
\begin{code}
D s s' *^ D u u'  = D (s*^u) ((scaleLM s <<< u') ^+^ (scaleLM' u <<< s'))
\end{code}
=== Note that @(scaleLM s <<< u') == s *^ u'@, since linear maps are also vector spaces.  There isn't a similar short-hand for the second summand.
* Now what about higher derivatives?  I think I know how to handle everything except the compositions.
* I wonder: If I flip my clock over to face the wall, are the hands still turning clockwise?
* I think I now understand what Taylor series have to do with the linear map perspective on derivatives (calculus on manifolds). ++++
*  Take a conventionally written Taylor series ++++
\begin{code}
f (x+d) = f x + d * D f x + 1/2 * d^2 * D (D f) x + ...
\end{code}
=== This form assumes that a derivative value (e.g., @D f x@) is a number and @d@ is a (small) number, so multiplying makes sense and the types all work out.
*  Now change our notion of differentiation: ++++
\begin{code}
D :: (a -> b) -> (a -> (a :-* b))
\end{code}
=== where @a :-* b@ means linear map from @a@ to @b@.   The multiplications in the usual Taylor series form no longer make sense.  However, recall that multiplying by a given value is a linear transformation.  So replace @d * D f x@ with @D f x d@.  What about the second-order term with the @d^2@?  Replace @d^2 * D f x@  @D (D f) x d d@.  Then, ++++
\begin{code}
f (x+d) = f x + D f x d + 1/2! * D (D f) x d d + ...
\end{code}
===
*  Now note that there is no assumption that @x@ and @d@ are scalar.  Rewrite again, using @VectorSpace@ operators: ++++
\begin{code}
f (x ^+ ^d) = f x ^+^ D f x d ^+^ 1/2! *^ D (D f) x d d + ...
\end{code}
===
*  Is anything more needed for the "multi-variate" (higher-dimensional domain) case?  See the [[multi-variate Taylor series Wikipedia article| http://en.wikipedia.org/wiki/Taylor_series#Taylor_series_in_several_variables]], which looks fairly different.
===
* I think I also understand how to take derivatives of lifted linear map compositions. ++++
*  First look at the function setting ++++
\begin{code}
m :: d -> (a :-* b)
n :: d -> (b :-* c)

p :: d -> (a :-* c)
p = liftA2 (.) n m
\end{code}
=== where @(.)@ is composition on linear maps.  (I'm using Arrow notation for convenience, but look for something prettier.)
*  What's the derivative of @p@?  Take the Taylor series forms of @m@ and @n@, form their linear composition.  Use the linearity of @(.)@ on each argument to expand out the composition and isolate the linear part.  The result linear transformation ++++
\begin{code}
D p x = \ d -> D m x d . n x ^+^ m x . D n x d
\end{code}
===
*  Write the linear transformation in point-free form: ++++
\begin{code}
D p x = (. n x) . D m x ^+^ (m x .) . D n x
\end{code}
=== Subtle point: I'm using a function-lifted version of (^+^) in the second form.
*  Hm.  Not a great improvement and certainly more obscure.  However, it does suggest a formulation that's suitable if linear transformations //are not// represented as functions: wrap up pre-composition and post-composition as ways to make linear transformations. ++++
\begin{code}
D p x = (postComp n x <<< D m x) ^+^ (preComp m x <<< D n x)
\end{code}
===
*  Now back to AD.  Instead of keeping a function and its derivative function separate, AD combines them into a single function with a structured result.  Then instead of writing function compositions, the AD operations are programmed on this result type, using overloading as much possible, so that AD expressions look just like non-AD ones.  Function arguments like @x@ above disappear.  I think we'd get something like the following: ++++
\begin{code}
D n n' <<< D m m' = D (n <<< m) ((postComp n <<< D m) ^+^ (preComp m <<< D n))
\end{code}
=== I'm using (<<<) at three different types here!
*  If linear transformations are represented as functions, then ++++
\begin{code}
D n n' <<< D m m' = D (n . m) ((n <<< D m) ^+^ (m <<< D n))
\end{code}
===
*  Note the similarity to the old product rule (with derivative values being represented as regular values): ++++
\begin{code}
D u u' * D v v' = D (u * v) (v *^ u' ^+^ u *^ v')
\end{code}
===
===
*  chessguy's [[ChessLibrary project| http://code.haskell.org/ChessLibrary/README.txt]].  I shared a lot of how my creative process works.  I liked geezusfreeek's contributions.
*  web-iswim
*  More chat with Peaker, though I'm tiring of it.  I don't think I've inspired him with the goal that motivates me to want a non-syntactic solution to revising denotations.
===
* Chatted with paolino again (see transcript) and then with Saizan on #haskell, about frp solutions to compilation management (make/cabal).  My conclusion: they have very short-term goals, and I want to start from scratch, building a new semantic foundation.  Working together wouldn't help either effort.
* Realization: it's not possible to say what one means, because meanings and words are different kinds of things.
* Worked more with chessguy in #haskell (chat saved), on his algebraic type for tree zippers.  It [[got quite lovely| http://hpaste.org/7731]] in the end.  Something came up that I want to ponder on more.  chessguy wanted an @Arbitrary@ instance, and his semantic model (zipper endomorphisms) already has an Arbitrary instance, so could he use it?  No.  Because it contains semantic //junk//, not corresponding to the meaning of any traversal.  In other words, the model is not [[universal| http://www.inf.ed.ac.uk/teaching/courses/fpls/note15.pdf]].  My question: is there a simple universal model?  If so, could it have an @Arbitrary@ instance?
* Some repo shuffling: ++++
*  New repo [[vector-space| http://darcs.haskell.org/vector-space]], with vector & affine spaces and generalized derivative towers.
*  Released it [[on hackage| http://hackage.haskell.org/cgi-bin/hackage-scripts/package/vector-space]].
*  Make simply-reactive depend on vector-space and move Integral there from FieldTrip-GLUT.
*  Push all three repos: vector-space (0.0), simply-reactive (0.6), f3d
===
* Duncan C set up the {{{vector-space}}} directory for me on http://code.haskell.org.  So I moved my repository there.
* Posted [[What is a derivative, really? Beautiful differentiation, part 2| http://conal.net/blog/posts/what-is-a-derivative-really/]] and started a [[reddit| http://reddit.com/info/6jw8w/comments/]].  Started part 3, which is about derivatives in my new library.  Part 4 will show how to use these derivatives for generating normals of parametric surfaces.
* Set up a [[ClustrMaps| http://clustrmaps.com]] account for my blog.  Pasted the code into my theme.  It updates my stats daily and shows a world map with my readers' locations.
* FieldTrip: ++++
*  I came up with a coding trick, and I'm torn about using it.  @HOpenGL@ has a @Vertex@ class.  I added instances for normals and colors, and for pairs.  Then surfaces over any nested pairing of such things can be rendered.  Moreover, one can do interpolation and differentiation generically, if the components support those operations.  However, it's not really safe, given the order-senstivity of processing of these components, and specifically that the real vertex must come last.  Perhaps provide a type of vertex-with-adornment, and give it a @VectorSpace@ instance.  I could play the tuple game on the adornments.  Even represent the vertex-with-adornment as a newtype around a pair, deriving the @VectorSpace@ instance.
===
* Derivatives: ++++
*  Oops!  I goofed in putting my derivative implementation back together.  I wondered why I didn't end up needing to differentiate *composition* of linear maps, explored on [[2008-05-17]].  The reason is that I fooled myself with the type of my chain rule, which used only the first derivative, which I then composed with all higher derivatives.  Luckily I caught my mistake when I tried to write down the chain rule directly.
*  For higher derivatives, I'm using curried linear functions, which correspond to multilinear uncurried functions.  What about more direct use of multilinear functions?  Hm!  Represent a derivative tower as a list of multilinear functions.  Even the constant term (//0^^th^^// derivative) is a "nullinear" (constant) function.  In general, the //n^^th^^// element in the tower is a multilinear map of degree //n//.  The question of composition may now become simpler.
*  There must be a close relationship between infinite AD and Taylor series manipulation.  What exactly is that relationship?  Maybe they differ only in whether the reciprocal factorial constant factors are present or implicit.  If left implicit, as in AD, how do they get manipulated correctly in operations other than sum?
*  Wow.  I think Jerzy's implementation of multiplication is exponentially inefficient as the derivative degree grows.  Note sum and product:  ++++
\begin{code}
D x0 x'     + D y0 y'     = D (x0 + y0) (x' + y')
x@(D x0 x') * y@(D y0 y') = D (x0 * y0) (x' * y + x * y')
\end{code}
=== So ++++
\begin{code}
x * y == x0*y0 D (x'*y + x*y')
== xy0 D (x1*y0 + x0*y1) D (x''*y + x'*y' + x'*y' + x*y'')
== xy0 D xy1 D (x2*y0 + x1*y1 + x1*y1 + x0*y2) D ...
== ...
\end{code}
=== Already we start to see repetition: the @x'*y'@ term appears twice, leading to computing @x1*y1@ twice and doubled work in the unrolling the two @x'*y'@.  The //n^^th^^// derivative value will have //2^^n^^// product terms, though only //n+1// distinct.
*  Since multiplication is at the heart of the chain rule, //everything// inherits this efficiency problem.
*  One solution may be switching to Taylor series.  Probably augmented with a means of speeding up the required factorial tweaks for extracting derivatives.
* Here's a note on the Haskell list, which I used in my google tech talk on tangible functional programming: +++>
[Haskell] Power series in a nutshell
Doug McIlroy
Thu, 12 Jul 2007 09:50:01 -0700

For lovers of things small and beautiful, http://www.cs.dartmouth.edu/~doug/powser.html boils down basic operations on power series with numeric coefficients to the bare minimum--each is a one-liner. Included are overloaded arithmetic operators, integration, differentiation, functional composition, functional inverse and coercion from scalars. --A telling demonstration of the power of lazy evaluation and of Haskell's attunement to math.

Doug McIlroy
===
*  Another solution may involve optimizing this one multiplication step.
*  Watch out for the chain rule, which also uses both a whole and a part
*  Consider the generalized chain rule, yet again: ++++
\begin{code}
D (f . g) x = D f (g x) . D g x
\end{code}
=== where the @(.)@ on the right composes //linear// functions.  I know how to deal with bilinear functions like @(*)@, @(*^)@, and @(<.>)@, so can I reduce the chain rule's RHS to a bilinear composition?  I want to see it as a @liftA2@ of a bilinear (really curried linear) function. ++++
\begin{code}
D (f . g) = liftA2 (.) (D f . g) (D g)
\end{code}
=== Recall that linear map composition is bilinear.  If I can handle the @(D f . g)@, then I'm done.  Hm.  In a sense, @(D f . g)@ is exactly what we do have, sampled at @x@.  However, the higher derivatives are all wrong, since they're all derivatives of @f@ at @g x@, not of @D f . g@ at @x@.  These derivatives map changes in @g x@, not in @x@.  On the other hand, the chain rule tells us how these two derivatives relate, given @D g x@, which we also know.
*  I got stuck with this representation: ++++
\begin{code}
data a :> b = D b (a :> (a :-* b))
\end{code}
=== and tried the following instead, which I'd accidentally written once before: ++++
\begin{code}
data a :> b = D b (a :-* (a :> b))
\end{code}
=== This data type is not nested.  Note the similarity with the type of infinitely differentiable functions: ++++
\begin{code}
type a :~> b = a -> (a:>b)
\end{code}
=== With the restriction that the derivative is a linear map, we could say ++++
\begin{code}
data a :> b = D b (a :~> b)
\end{code}
===
===
* What static typing matters: a correct type includes all correct implementations and excludes some incorrect implementation.  In some cases, very few incorrect implementations have the required type.  Types therefore serve a machine-checkable partial specification, i.e., necessary condition for correctness.  //Statically// checkable types therefore go part of the way toward machine-checked correctness.  The essential point here isn't types at all, it's automated partial specification checking.
* Came up with a {{{cabal-make}}} work-around for the problem of cabal making documentation directories that I'm not allowed to write.  See the {{{install}}} target in {{{cabal-make.inc}}}.
* Added {{{cabal-make}}} target {{{release}}}, which bundles up the steps in the [[project release check-list]].  Easy!! ++++
\begin{code}
release: clean doc install release-record tag push test-get-build web-doc-see darcs-dist upload wiki
\end{code}
===
* Published my third differentiation blog post: [[Higher-dimensional, higher-order derivatives, functionally| http://conal.net/blog/posts/higher-dimensional-higher-order-derivatives-functionally/]]
* What to do next?  Possibilities: ++++
*  Shift thinking to back to authoring tools
*  Read and noodle over chessguy's Chessboard module
*  Make the derivative stuff more visible.  Hook up to an expression type.
*  Write post about surface modeling
*  FieldTrip: ++++
*   More surface tools, including @displace@, from Vertigo
*   Can we have a simple way to render an image, independent of FRP and GLUT?
*   Design texture mapping.
*   Quad-tree representation of surfaces and images to complement the parametric representation.
*   Solid textures
*   Tubes, say via frenet frames.  Try first without arc-length re-parameterization.
*   Explore uses of higher derivatives.
===
*  Lennart A pointed me to his [["numbers" package| http://hackage.haskell.org/cgi-bin/hackage-scripts/package/numbers]], which includes symbolic expressions, derivative towers (scalar domains), a bit of interval arithmetic, constructive (exact) reals, and a few others types.  I want to use the expressions to examine my derivative stuff.  Gave it a try and ran into a problem immediately.  His representation assumes all arguments have the same type, which doesn't work for, say, scalar multiplication.
*  Started on a more flexible type of typed expressions.
===
* Got my reviews for "Simply efficient functional reactivity".  Mixed.
* Chatting with Andy, I got an idea: hook up my simple graph specification idea ([[2008-04-25]]) to generate {{{dot}}} format.
* Got some interest on #haskell for "Functional Cabal" a functional successor/replacement to the current (very imperative) Cabal.  I mentioned I'm looking for research collaborators on the idea.  Got positives from thetallguy, geezusfreeek, dcoutts, edwardk.
* How does my "simply" paper relate to [[adaptive functional programming| http://www.cs.cmu.edu/~guyb/pubs.html]] (self-adjusting computation)? ++++
*  Simple denotational model.
*  Interface and implementation are purely functional (given the functional unamb operator).
*  Hindley-Milner typing suffices vs no modal typing.
*  More precise/efficient updating because of Applicative rather than continuation/monad implementation.  (See "[[Monads for Incremental Computing| http://citeseer.ist.psu.edu/carlsson02monads.html]]".)
*  Simple static safety vs "correct usage" requirement.
===
* The AFP papers use the example of "adaptive Quicksort", which updates a sorted list in log time, in response to an added element.  Try the same example in my implementation.  Experimenting in {{{~/Haskell/Quicksort.hs}}} ++++
*  For insertion, they define a list type with static heads but dynamic tails.  In a FRP formulation, I'd have a reactive cons whose head is constant and whose tail is initially empty and later a new such reactive cons.  Use a reactive if-then-else.
*  The standard non-adaptive algorithm: ++++
\begin{code}
qsort1 :: Ord a => [a] -> [a]
qsort1 []     = []
qsort1 (p:xs) = qsort1 (filter (< p) xs) ++ [p] ++ qsort1 (filter (>= p) xs)
\end{code}
=== The AFP papers use an accumulating parameter to eliminate the appends.  In Haskell, ++++
\begin{code}
qsort2 :: Ord a => [a] -> [a]
qsort2 l = q l []
where
q []     = id
q (p:xs) = q (filter (< p) xs) . (p :) . q (filter (>= p) xs)
\end{code}
=== We'll also want a @filter@ for making adaptive: ++++
\begin{code}
filt :: (a -> Bool) -> [a] -> [a]
filt _ []                 = []
filt f (x:xs) | f x       = x : filt f xs'
| otherwise = filt f xs'
\end{code}
=== I could factor out the recursive call, but the AFP papers didn't, and I won't bother.  (See sample code, though.)
*  Next a non-incremental FRP version.  First, for fun, turn an element-generating event into a sorter: ++++
\begin{code}
gather1 :: Event a -> Event [a]
gather1 e = [] accumE (fmap (:) e)
\end{code}
=== Then sort incoming elements: ++++
\begin{code}
qr1 :: Ord a => Event a -> Event [a]
qr1 = fmap qsort2 . gather1
\end{code}
=== Or via reactive values: ++++
\begin{code}
gather2 :: Event a -> Reactive [a]
gather2 e = [] accumR (fmap (:) e)

qr2 :: Ord a => Event a -> Reactive [a]
qr2 = fmap qsort2 . gather2
\end{code}
===
*  Of course, for incrementality (adaptivity), we won't want to use @fmap@.  Instead, we'd rewrite @qsort2@ on reactive lists.  For instance, ++++
\begin{code}
ifA :: Applicative h => h Bool -> h a -> h a -> h a
ifA = liftA3 (\ c t e -> if c then t else e)

filt4 :: Applicative h => (a -> Bool) -> h [a] -> h [a]
filt4 f l =
ifA (null <$> l) (pure [])$
let (x,xs) = unCons l in
ifA (f <$> x) (liftA2 (:) x (filt4 f xs)) (filt4 f xs) \end{code} === Factoring out the recursive call, ++++ \begin{code} filt4 f l = ifA (null <$> l) (pure []) $let (x,xs) = unCons l in (ifA (f <$> x) ((:) <$> x) (pure id)) <*> filt4 f xs \end{code} === * It might be important to handle propagation specially for @ifA@. * Now try a special representation that allows only changeable tails, as in the AFP papers. ++++ \begin{code} data ListF' f a = Nil | Cons a (ListF f a) type ListF f a = f (ListF' f a) \end{code} === * Here's a monadic formulation: ++++ \begin{code} filt5 :: Monad f => (a -> Bool) -> ListF f a -> ListF f a filt5 f lf = do l <- lf case l of Nil -> return Nil Cons x xs -> let xs' = filt5 f xs in if f x then return (Cons x xs') else xs' \end{code} === I don't know how to do without @Monad@. Odd, considering that the more flexibly changeable version (@filt4@) needed only @Applicative@. * This version is similar to, but simpler than, the AFP version: ++++ {{{ fun filter' f l = let fun filt(l,d) = read(l, fn l' => case l' of NIL => write(d, NIL) | CONS(h,r) => if f(h) then write(d, CONS(h, modl(function d => filt(r,d)))) else filt(r,d)) in modl(function d => filt(l, d)) end }}} === * Compare also with Magnus Carlsson's version from "[[Monads for Incremental Computing| http://citeseer.ist.psu.edu/carlsson02monads.html]]": ++++ \begin{code} filter' :: (Eq a, NewMod m) => (a -> Bool) -> Mod (List' a) -> m (Mod (List' a)) filter' f l = newMod (filt l) where filt l = do l' <- readMod l case l' of NIL -> return NIL CONS h r -> if f h then CONS h liftM newMod (filt r) else filt r \end{code} === === * Note to Krasimir Angelov's request for feedback on the his tree zipper interface. +++> I had a long exchange with chessguy about this interface, suggesting a significant change in style, simplifying the type. (Incidentally, the change removed the State and hence mtl dependence.) The conversation is on http://tunes.org/~nef/logs/haskell/08.05.17, starting with "12:08:11 <chessguy> w00t!" and really picking up with "<conal> chessguy: something smells funny ...". Here's a summary of the conversation, though I encourage you to read the whole thing: * Every definition of type @State (TreeLoc a) a@, does a @getLabel@ at the end (except @getLabel@ itself). * Often users of those movement functions discard the result. * Simpler and more orthogonal would be remove the @getLabel@ and return @State (TreeLoc a) ()@ instead. * Now remove that return value altogether, simplifying the type of zipper movements to just @TreeLoc a -> TreeLoc a@. Then they compose nicely with @(.)@, having @id@ as identity. * Simplify the type of @getLabel@ to just @TreeLoc a -> a@. Now no more @State@. === * Responded to reviewer comments on my ICFP @08 submission. * Found blog post //[[Bicycling for Collatz| http://swik.net/Haskell/Data.Syntaxfree/Bicycling+for+Collatz/bmt0z]]//, giving a very simple way to generate {{{.dot}}} format from Haskell values. Nice application to graphing functions on a finite domain. Copied code into {{{~/Haskell/Bike.hs}}} to play with. Installed [[graphviz| http://www.graphviz.org/]] and tried out the code. Fun & simple. * There's also [[a module in the inductive graph library| http://www.haskell.org/ghc/docs/latest/html/libraries/fgl/Data-Graph-Inductive-Graphviz.html]] that converts a graph to graphviz format. * Adding displacement mapping to FieldTrip. ++++ * I want a single definition to apply to both curves and surfaces. * The code: ++++ \begin{code} displaceV :: (InnerSpace v s, Floating s, HasNormal v) => v -> s -> v displaceV v s = v ^+^ s *^ normal v displace :: (InnerSpace v s, Floating s, HasNormal v, Applicative f) => f v -> f s -> f v displace = liftA2 displaceV \end{code} === The first version almost sufficed for functions (surfaces & curves), but scalar multiplication on functions is lifted only in the second argument. I'd had the other definition (@(*^) = liftA2 (*^)@), with functions as scalars, but it wasn't compatible with the meaning on linear maps. (I also lost dot products in the process.) * I moved cross products into the {{{vector-space}}} package, using a @HasNormal@ class: ++++ \begin{code} -- | Thing with a normal vector (not necessarily normalized). class HasNormal v where normalVec :: v -> v -- | Normalized normal vector. See also 'cross. normal :: (HasNormal v, InnerSpace v s, Floating s) => v -> v normal = normalized . normalVec \end{code} === Two instances: ++++ \begin{code} instance (Num s, VectorSpace s s) => HasNormal (s :> Vector2 s) where normalVec v = cross2 (deriv v 1) instance (Num s, VectorSpace s s) => HasNormal (Vector2 s :> Vector3 s) where normalVec v = v' xVector2 cross3 v' yVector2 where v' = dDeriv v \end{code} === I made similar instances for pairs and triples. Also, variants for types like @Three (Vector2 s :> s)@. * I'm having doubts about using the OpenGL types for vectors, instead of tuples. I use tuples in @ParamSurf@, but tuples in @Geometry3@, and I have to convert between them. Instead, we could just use tuples in FieldTrip, and convert to OpenGL types when rendering. For static shapes, the conversion would happen only once, while building the rendering actions. For dynamic shapes (more fun), perhaps ghc could remove at least some of the intermediate representations. One advantage of the OpenGL is that they're strict. I don't know if that strictness is helping us, compared with ghc's strictness analysis. * Somewhere I broke things and am getting an undefined @(==)@ test on @(a:>b)@. I don't know how to track down the source code that's calling @(==)@, because it's in a separate library (@vector-space@). Idea: tweak my {{{.ghci}}} file to add the other source directories to the source path. To debug, to "{{{:trace main}}}" in ghci. === * FieldTrip performance: ++++ * Andy had an idea about performance of surface sampling, which is a problem now. Consider the torus. It's a single curve (circle) swept around an axis. Because we're sampling on a regular grid, that circle gets sampled over & over at the same sequence of inputs. One simple solution would be to represent surfaces in //curried// form. Then there'd be the possibility of doing some work based on the first parameter to be reused as the second parameter varies. Perhaps the general problem is that a surface (function) contains work that depends on one parameter and some work that depends on the other, and some that depends on both. * Still, we're getting a huge amount of computation in the * Tips on profiling, from Andy: ++++ * In the .cabal file, add {{{ghc-prof-options: -prof -auto-all}}} * Then use {{{runhaskell Setup.lhs configure --enable-library-profiling}}} === The library gets built with and without profiling. * === * Formulation of distance formula: @(fmap.fmap) sqrt ((+) on (^2))@. Or use @join (*)@ in place of @(^2)@. * Noodling over why displacement is so very slow. ++++ * Could it have to do with the representation of derivatives as (linear) functions? See saved conversation with Andy. He's getting 73 cross-product computations per sample point. One would be for the normal used in shading. what would the other be for? * Andy's example involves this function: ++++ \begin{code} fun :: Surf (Vector2 R :> R) fun v = 0 ^/ magnitude r1 where r1 = unvector3F (normalV (vector3F (tt v))) \end{code} === * When I drop @unvector3F@, my per-vertex count (of cross products) minus one drops from 72 to 24, i.e., one third. ++++ \begin{code} unvector3F :: (Functor f) => f (Vector3 s) -> Three (f s) unvector3F d = (vector3x <$> d, vector3y <$> d, vector3z <$> d)
\end{code}
=== Here, @f a = Vector2 R :> a@.

*  I suspect @distribD@ is a big part of the problem.  It generalizes the product rule that Jerzy used: ++++
\begin{code}
x@(D x0 x') * y@(D y0 y') = D (x0 * y0) (x' * y + x * y')
\end{code}
=== As on [[2008-05-19]], this formulation computes redundant products after the first derivative.  In //[[The Music of Streams| http://citeseer.ist.psu.edu/mcilroy00music.html]]// (section 4.1 called "Complexity"), Doug McIlroy says ++++>
The product operators for Maclaurin and Horner forms respectively take //O(2^^n^^)// and //O(n^^2^^)// coefficient-domain operations to compute //n// terms.  Evidently it would be advantageous to compute the product of Maclaurin-form series by bypassing (Melzak 1983) to Hormer form and back.
=== The work done for the //n^^th^^// derivative of a product in the Horner form seems to be linear, in contrast to the exponential work for the Maclaurin form.
*  Instead of converting back & forth with Maclaurin form,  maybe a simpler solution is to work in the Horner form throughout, i.e., a straight power series.  I'm trying that alternative now.  It only changes a few things, but it does require distinguishing differentiation from accessing part of the representation, and integration from constructing the representation.
*  Now there are two power series modules: @Maclaurin@ (was @Derivative@) and @Horner@.  Now @Derivative@ is an indirection to one or the other, for testing.  See Doug McIlroy's paper "The Music of Streams".  However, I don't notice a speedup in the displacement examples.  Maybe it does have to do with my function representations and related loss of sharing.
*  Perhaps I could give a data representation of linear maps if I tracked bases of vector spaces.  Every linear map is fully determined by its action on a basis.  The basis doesn't have to be orthogonal or normal.  Just linearly independent.  I'd also have to be able to decompose arbitrary vectors in terms of the basis.
===
* Andy fixed our terrible performance problem by memoizing derivatives.  The current definition: ++++
\begin{code}
data a :> b = H b (a :-* (a :> b))
\end{code}
=== Since linear maps are represented as //functions//, this representation is begging for trouble.  Andy's fix: ++++
\begin{code}
data a :> b = D b (Memo a (a :> b))
\end{code}
=== where @Memo u v@ memoizes @u -> v@ in a finite map, where @a@ must be in @Ord@.  With surfaces, @a = R2@ and @b = R3@.  Andy also noticed that these memoized functions get two distinct arguments each, namely @(0,1)@ and @(1,0)@, i.e., the usual basis.  I use these vectors to extract the usual "partial derivatives", though I could use //any// two linearly independent vectors.  So, the question arises: could I represent these linear maps as a pair instead of a (linear) function?  Of course, this pair representation would have to fall out as a special case of a general scheme.
* Consider representing a power series as a sequence of //multilinear// maps (see [[2008-05-19]]).  The //n^^th^^// map is //n//-linear, if @f (c *^ v) == c^n * f v@.  Lemma: The product of an //m//-linear function with an //n//-linear function is //(m+n)//-linear.  This lemma leads to the usual product rule for power series.
* Another idea: Eliminate all but functions of a scalar value, by using currying.  Then use the simpler derivative towers, as in Jerzy's paper.  (Though still fix products to have linear instead of exponential complexity in the derivative degree.)  Then a surface would have type @s -> s -> (s,s,s)@ instead of @(s,s) -> (s,s,s)@.  This representation has the further benefit that it allows partial computation based on the first parameter.  This partial application would, I think, be especially useful for surfaces of revolution and other profile products.  I can think of the curried type as saying that a surface is an infinite family of curves.
* Correspondence on emergence-of-nvc about bottom-up certification.
* Thinking about non-function representations of linear maps.  I wonder why my earlier approach didn't work, in which I represented @(a,b) :-* c@ as @(a :-* c, b :-* c)@.  Try again.
* Cross product has two arguments in 3D but only one argument in 2D.  Think of it instead as an operation on @R2 :-* R3@ or @R :-* R2@, and then note that these two linear transformations can be represent as @(R3,R3)@ and @R2@, respectively.
* Here's a variation on Andy's use of @memo@: represent a linear map as a specially memoized function.  Because linear maps are so constrained, the map can be prepopulated to hold just values corresponding to a basis of the vector space.  To compose maps, just memoize the composition of functions.  The memoizer would rely on being given //linear// functions.
* Linear maps and derivatives: ++++
*  Yesterday I came up with a simple idea to represent linear maps efficiently, using a type family. ++++
\begin{code}
-- | Domain of a linear map.
class VectorSpace a s => LMapDom a s where
-- | Linear map type
data (:-*) a :: * -> *
-- | Linear map as function
($*) :: VectorSpace b s => (a :-* b) -> (a -> b) -- | Function (assumed linear) as linear map. linear :: (a -> b) -> (a :-* b) \end{code} === I tried something similar before, but I hadn't thought of @linear@, which essentially memoizes over a basis of the domain vector space. * Here are some instances: ++++ \begin{code} instance LMapDom Float Float where data Float :-* v = FloatL v ($*) (FloatL v)   = (*^ v)
linear f          = FloatL (f 1)

-- | Convenience function for 'linear' definitions.  Both functions are
-- assumed linear.
linearK :: (LMapDom a s) => (a -> b) -> (b -> c) -> a :-* c
linearK k f = linear (f . k)

instance (LMapDom a s, LMapDom b s) => LMapDom (a,b) s where
data (a,b) :-* o = PairL (a :-* o) (b :-* o)
PairL ao bo $* (a,b) = ao$* a ^+^ bo * b linear = liftA2 PairL (linearK (\ a -> (a,zeroV))) (linearK (\ b -> (zeroV,b))) \end{code} === * Lots of changes needed for derivative towers. Works out pretty well. * //But// there's a catch. What @VectorSpace@ instance do I use for derivative towers? It seems to be that I need one instance for compatibility with linear maps, and a different instance for use in parametric surfaces. I've been fiddling with several variations, and I'm stuck so far. * I tried re-wrapping @(a:>b)@ in another @newtype@ to allow the second @VectorSpace@ instance. Mostly worked, but I don't seem to be able to apply my derivatives to basis vectors. Try again tomorrow. If this one application is the only problem, I can probably make something special that gets to the @VectorSpace@ instance of the underlying representation. === * Linear maps & derivatives: ++++ * Simple idea: represent linear maps as functions but use a smart @(:>)@ constructor that wraps a special basis-memoizer. Hm. I'd still require the @LMapDom@ constraint. * I'm making great progress with the new derivative stuff. I really struggled over that instance conflict, even after re-wrapping @(:>)@ in a newtype. Now I think it's in good shape, but I ran into another problem, which appears to be a type-family implementation bug in ghc-6.8.2, preventing me from adding a @LMapDom@ instance for @Vector2@ and @Vector3@. Some options: ++++ * Upgrade to ghc-6.9, and cross my fingers (since I'm on Windows). However, I'd rather our library didn't depend on ghc-6.9. * Stop using the OpenGL types in our interface, as we discussed, and use tuples instead. A down side is that I have to rip up even more of the system before getting to another stable point. * Temporarily add @LMapDom@ instances for the OpenGL types to the vector-space package. Remove later. === Taking the third option. * I got everything working again. Phew! Trying the tests, I see a dramatic slow-down, in contrast to my intended speed-up. Hm! Ask Andy's help with profiling and examining the results. === * Andy found a dramatic speed-up for my linear map code, which was simply avoiding the @(<*>)@-like operation. He believes we're getting a huge amount of repeated computation due to use of functions, which don't cache. And particulary due to use of @(<*>)@ on functions. And that the non-sharing is hidden by implicit dictionary passing. I don't understand it yet. Now the examples that were fast are fast again, but displacement examples are still considerably slower. * I found that my @linear.lapply@ rule is not firing. I don't know why yet. /*** | Name:|AudioPlayerPlugin| | Author:|[[Conal Elliott|http://conal.net]]| | Version:|1.0.0, 2006-11-15| !Examples <<audioPlayer http://www.cnvc.org/downlds/20020510.mp3>> <<marshallism 20020329>> {{{ <<audioPlayer http://www.cnvc.org/downlds/20020510.mp3>> <<marshallism 20020329>> }}} !History * 1.0.0 (2006-11-15): first release !Code ***/ //{{{ version.extensions.AudioPlayerMacro = { major: 1, minor: 0, revision: 0, date: new Date(2006,11,15), source: "..." }; config.macros.audioPlayer = { player: function(url) { return "<html><iframe src=\"http://mail.google.com/mail/html/audio.swf?audioUrl="+url+"\" style=\"width: 250px; height: 25px; border: 1px solid #aaa;\" id=musicPlayer></iframe></html>"; }, play: function (url,place) { wikify(config.macros.audioPlayer.player(url),place); }, handler: function (place,macroName,params,wikifier,paramString,tiddler) { config.macros.audioPlayer.play(params[0],place); } } config.macros.marshallism = { handler: function (place,macroName,params,wikifier,paramString,tiddler) { config.macros.audioPlayer.play("http://www.cnvc.org/downlds/"+params[0]+".mp3",place); } } // Use with a slider. Doesn't work :(. The player doesn't materialize. config.macros.playerSlider = { play: function(label,url,place) { wikify("+++["+label+"]"+config.macros.audioPlayer.player(url)+"=== ",place); }, handler: function (place,macroName,params,wikifier,paramString,tiddler) { config.macros.playerSlider.play(params[0],params[0],place); } } config.macros.mbr= { handler: function (place,macroName,params,wikifier,paramString,tiddler) { config.macros.playerSlider.play(params[0],"http://www.cnvc.org/downlds/"+params[1]+".mp3",place); } } //}}} /*** |''Name:''|AutoTaggerPlugin| |''Source:''|http://www.TiddlyTools.com/#AutoTaggerPlugin| |''Author:''|Eric Shulman - ELS Design Studios| |''License:''|[[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]| |''~CoreVersion:''|2.0.10| Automatically tag tiddlers with their original creation date and author and optionally scan the tiddler content for any tags that are embedded as text. Makes cross-referencing your tiddlers a snap! !!!!!Usage <<< When ~AutoTagger is present, it automatically ''generates 'creation date' and 'creator' tag values'' for all newly created tiddlers, so that this information is retained even after a tiddler has been updated many times. In addition, if you enter ''//auto//'' as a tiddler tag value, ~AutoTagger ''scans the tiddler content'' (including title) for all existing tags, and ''automatically adds any embedded tags that it finds''. You can also specify a default tag (e.g. "untagged") that will be added to the tiddler if no other tags have been specified. This ensures that all tiddlers will have at least one tag associated with them. After the new tags have been added to the tiddler, they are treated just as if you had entered them by hand and can be edited to make any changes you want. Of course, as long as the "auto" tag is still present on a tiddler, ~AutoTagger will re-scan that tiddler's content each time it is edited. If you DO edit the generated tags, you can remove the "auto" tag from the tiddler to prevent it from being re-scanned when you press 'done' to finish editing. //Note: the special-purpose ''"systemConfig" tag is not added automatically, even if matched in the tiddler content'', since this tag should be added manually to ensure it is always used appropriately.// //Note: if you have set the "auto" tag on a tiddler, and then add several tags to your document, those tags will ''not'' be automatically added to the tiddler until you actually edit that tiddler and press 'done' to trigger an AutoTagger scan.// <<< !!!!!Configuration <<< The ~AutoTagger plugin comes with a ''self-contained control panel''. Use these controls to enable or disable automatic 'creation date' or 'creator' tagging, modify the default date formatting, or redefine the special 'scan trigger' tag value (so you can use "auto" as a normal tag value in your document). <<option chkAutoTagAuthor>> add 'created by' tag //(when a tiddler is first created)// <<option chkAutoTagDate>> add 'creation date' tag, using date format: <<option txtAutoTagFormat>> <<option chkAutoTagEditor>> add 'edited by' tag //(when a tiddler is updated)// <<option chkAutoTagTrigger>> scan tiddler content for matching tags when tagged with: <<option txtAutoTagTrigger>> <<option chkAutoTagDefault>> add default tag(s) to tiddlers that are not otherwise tagged: <<option txtAutoTagDefault>> ---- //date formatting syntax:// ^^//''DDD'' - day of week in full (eg, "Monday"), ''DD'' - day of month, ''0DD'' - adds leading zero//^^ ^^//''MMM'' - month in full (eg, "July"), ''MM'' - month number, ''0MM'' - adds leading zero//^^ ^^//''YYYY'' - full year, ''YY'' - two digit year//^^ <<< !!!!!Installation <<< import (or copy/paste) the following tiddlers into your document: ''AutoTaggerPlugin'' (tagged with <<tag systemConfig>>) <<< !!!!!Revision History <<< ''2007.01.20 [1.4.1]'' don't add create date tag to dated journal tiddlers (based on request from ConalElliott) ''2006.12.10 [1.4.0]'' added option to use default tag value when no tags are specified ''2006.08.29 [1.3.3]'' use newTags.contains() instead of newTags.find() to check for 'auto' tag ''2006.06.15 [1.3.2]'' hijack TiddlyWiki.prototype.saveTiddler instead of store.saveTiddler. Permits other plugins to also hijack the function (thanks to Simon Baird for finding this!) ''2006.05.31 [1.3.1]'' Re-assemble tags into a space-separated string (use encodeTiddlyLink to add {{{[[...]]}}} as needed) before passing it on to core (or other hijacked function) ''2005.10.09 [1.3.0]'' Added 'edited by' tagging. Combined documentation and code into a single tiddler ''2005.08.16 [1.2.0]'' Added optional scanning for tags in tiddler content (based on suggestion from Jacques Turbé) ''2005.08.15 [1.1.0]'' Added 'created by' tag generation (based on suggestion from Elise Springer). Renamed from DateTag to AutoTagger ''2005.08.15 [1.0.0]'' Initial Release <<< !!!!!Credits <<< This feature was developed by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]]. <<< !!!!!Code ***/ //{{{ version.extensions.autoTagger = {major: 1, minor: 4, revision: 1, date: new Date(2007,1,20)}; if (config.options.chkAutoTagDate==undefined) config.options.chkAutoTagDate=false; if (config.options.chkAutoTagEditor==undefined) config.options.chkAutoTagEditor=false; if (config.options.chkAutoTagAuthor==undefined) config.options.chkAutoTagAuthor=false; if (config.options.chkAutoTagTrigger==undefined) config.options.chkAutoTagTrigger=false; if (config.options.txtAutoTagTrigger==undefined) config.options.txtAutoTagTrigger="auto"; if (config.options.chkAutoTagDefault==undefined) config.options.chkAutoTagDefault=false; if (config.options.txtAutoTagDefault==undefined) config.options.txtAutoTagDefault="untagged"; if (config.options.txtAutoTagFormat==undefined) config.options.txtAutoTagFormat="YYYY.0MM.0DD"; // hijack saveTiddler() TiddlyWiki.prototype.coreSaveTiddler=TiddlyWiki.prototype.saveTiddler; TiddlyWiki.prototype.saveTiddler=function(title,newTitle,newBody,modifier,modified,tags) { // get the tags as passed from the tiddler editor var newTags = []; if (tags) newTags = (typeof tags == "string") ? tags.readBracketedList() : tags; // if saving a new tiddler, add 'creation date' tag var now=new Date().formatString(config.options.txtAutoTagFormat); if (config.options.chkAutoTagDate && (store.getTiddler(title)==undefined)) if (newTitle!=now) newTags.pushUnique(now); // don't add create date tag to dated journal tiddlers // if saving a new tiddler, add 'created by' tag if (config.options.chkAutoTagAuthor && (store.getTiddler(title)==undefined)) newTags.pushUnique(config.options.txtUserName); // if saving an existing tiddler, add 'edited by' tag if (config.options.chkAutoTagEditor && (store.getTiddler(title))) newTags.pushUnique(config.options.txtUserName); // if tagged for scanning, find tags embedded in text of tiddler title/body var allTags = store.getTags(); if (config.options.chkAutoTagTrigger && config.options.txtAutoTagTrigger.length && newTags.contains(config.options.txtAutoTagTrigger)) for (var t=0; t<allTags.length; t++) { // note: don't automatically tag a tiddler with 'systemConfig' if (allTags[t][0]=='systemConfig') continue; if ((newBody.indexOf(allTags[t][0])!=-1) || (newTitle.indexOf(allTags[t][0])!=-1)) newTags.pushUnique(allTags[t][0]); } // encode tags with [[...]] (as needed) for (var t=0; t<newTags.length; t++) newTags[t]=String.encodeTiddlyLink(newTags[t]); // if there are no tags on this tiddler (either user-entered or auto-tagged) // and AutoTagDefault is enabled then use default tag (if any) if (!newTags.length && config.options.chkAutoTagDefault && config.options.txtAutoTagDefault.length) newTags.push(config.options.txtAutoTagDefault); // reassemble tags into a string (for other plugins that require a string) and pass it all on return this.coreSaveTiddler(title,newTitle,newBody,modifier,modified,newTags.join(" ")); } //}}} /*** |''Name:''|CalendarPlugin| |''Source:''|http://www.TiddlyTools.com/#CalendarPlugin| |''Author:''|SteveRumsby| |''License:''|unknown| |''~CoreVersion:''|2.0.10| // // updated by Jeremy Sheeley to add cacheing for reminders // // see http://www.geocities.com/allredfaq/reminderMacros.html // // ''Changes by ELS 2006.08.23:'' // // added handling for weeknumbers (code supplied by Martin Budden. see "wn**" comment marks) // // ''Changes by ELS 2005.10.30:'' // // config.macros.calendar.handler() // // ^^use "tbody" element for IE compatibility^^ // // ^^IE returns 2005 for current year, FF returns 105... fix year adjustment accordingly^^ // // createCalendarDays() // // ^^use showDate() function (if defined) to render autostyled date with linked popup^^ // // calendar stylesheet definition // // ^^use .calendar class-specific selectors, add text centering and margin settings^^ !!!!!Configuration: <<option chkDisplayWeekNumbers>> Display week numbers //(note: Monday will be used as the start of the week)// |''First day of week:''|<<option txtCalFirstDay>>|(Monday = 0, Sunday = 6)| |''First day of weekend:''|<<option txtCalStartOfWeekend>>|(Monday = 0, Sunday = 6)| !!!!!Syntax: |{{{<<calendar>>}}}|Produce a full-year calendar for the current year| |{{{<<calendar year>>}}}|Produce a full-year calendar for the given year| |{{{<<calendar year month>>}}}|Produce a one-month calendar for the given month and year| |{{{<<calendar thismonth>>}}}|Produce a one-month calendar for the current month| |{{{<<calendar lastmonth>>}}}|Produce a one-month calendar for last month| |{{{<<calendar nextmonth>>}}}|Produce a one-month calendar for next month| ***/ // //Modify this section to change the text displayed for the month and day names, to a different language for example. You can also change the format of the tiddler names linked to from each date, and the colours used. //{{{ config.macros.calendar = {}; config.macros.calendar.monthnames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; config.macros.calendar.daynames = ["M", "T", "W", "T", "F", "S", "S"]; config.macros.calendar.weekendbg = "#c0c0c0"; config.macros.calendar.monthbg = "#e0e0e0"; config.macros.calendar.holidaybg = "#ffc0c0"; //}}} // //''Code section:'' // (you should not need to alter anything below here)// //{{{ if(config.options.txtCalFirstDay == undefined) config.options.txtCalFirstDay = 0; if(config.options.txtCalStartOfWeekend == undefined) config.options.txtCalStartOfWeekend = 5; if(config.options.chkDisplayWeekNumbers == undefined)//wn** config.options.chkDisplayWeekNumbers = false; if(config.options.chkDisplayWeekNumbers) config.options.txtCalFirstDay = 0; config.macros.calendar.tiddlerformat = "YYYY-MM-DD"; // This used to be changeable - for now, it isn't// <<smiley :-(>> version.extensions.calendar = { major: 0, minor: 6, revision: 0, date: new Date(2006, 1, 22)}; config.macros.calendar.monthdays = [ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; config.macros.calendar.holidays = [ ]; // Not sure this is required anymore - use reminders instead //}}} // //Is the given date a holiday? //{{{ function calendarIsHoliday(date) { var longHoliday = date.formatString("YYYY-MM-DD"); var shortHoliday = date.formatString("YYYY-MM-DD"); for(var i = 0; i < config.macros.calendar.holidays.length; i++) { if(config.macros.calendar.holidays[i] == longHoliday || config.macros.calendar.holidays[i] == shortHoliday) { return true; } } return false; } //}}} // //The main entry point - the macro handler. // //Decide what sort of calendar we are creating (month or year, and which month or year) // // Create the main calendar container and pass that to sub-ordinate functions to create the structure. // ELS 2005.10.30: added creation and use of "tbody" for IE compatibility and fixup for year >1900// // ELS 2005.10.30: fix year calculation for IE's getYear() function (which returns '2005' instead of '105')// // ELS 2006.05.29: add journalDateFmt handling// //{{{ config.macros.calendar.handler = function(place,macroName,params) { var calendar = createTiddlyElement(place, "table", null, "calendar", null); var tbody = createTiddlyElement(calendar, "tbody", null, null, null); var today = new Date(); var year = today.getYear(); if (year<1900) year+=1900; // get format for journal link by reading from SideBarOptions (ELS 5/29/06 - based on suggestion by Martin Budden) var text = store.getTiddlerText("SideBarOptions"); this.journalDateFmt = "YYYY-MM-DD"; var re = new RegExp("<<(?:newJournal)([^>]*)>>","mg"); var fm = re.exec(text); if (fm && fm[1]!=null) { var pa=fm[1].readMacroParams(); if (pa[0]) this.journalDateFmt = pa[0]; } if (params[0] == "thismonth") { cacheReminders(new Date(year, today.getMonth(), 1, 0, 0), 31); createCalendarOneMonth(tbody, year, today.getMonth()); } else if (params[0] == "lastmonth") { var month = today.getMonth()-1; if (month==-1) { month=11; year--; } cacheReminders(new Date(year, month, 1, 0, 0), 31); createCalendarOneMonth(tbody, year, month); } else if (params[0] == "nextmonth") { var month = today.getMonth()+1; if (month>11) { month=0; year++; } cacheReminders(new Date(year, month, 1, 0, 0), 31); createCalendarOneMonth(tbody, year, month); } else { if (params[0]) year = params[0]; if(params[1]) { cacheReminders(new Date(year, params[1]-1, 1, 0, 0), 31); createCalendarOneMonth(tbody, year, params[1]-1); } else { cacheReminders(new Date(year, 0, 1, 0, 0), 366); createCalendarYear(tbody, year); } } window.reminderCacheForCalendar = null; } //}}} //{{{ //This global variable is used to store reminders that have been cached //while the calendar is being rendered. It will be renulled after the calendar is fully rendered. window.reminderCacheForCalendar = null; //}}} //{{{ function cacheReminders(date, leadtime) { if (window.findTiddlersWithReminders == null) return; window.reminderCacheForCalendar = {}; var leadtimeHash = []; leadtimeHash [0] = 0; leadtimeHash [1] = leadtime; var t = findTiddlersWithReminders(date, leadtimeHash, null, 1); for(var i = 0; i < t.length; i++) { //just tag it in the cache, so that when we're drawing days, we can bold this one. window.reminderCacheForCalendar[t[i]["matchedDate"]] = "reminder:" + t[i]["params"]["title"]; } } //}}} //{{{ function createCalendarOneMonth(calendar, year, mon) { var row = createTiddlyElement(calendar, "tr", null, null, null); createCalendarMonthHeader(calendar, row, config.macros.calendar.monthnames[mon] + " " + year, true, year, mon); row = createTiddlyElement(calendar, "tr", null, null, null); createCalendarDayHeader(row, 1); createCalendarDayRowsSingle(calendar, year, mon); } //}}} //{{{ function createCalendarMonth(calendar, year, mon) { var row = createTiddlyElement(calendar, "tr", null, null, null); createCalendarMonthHeader(calendar, row, config.macros.calendar.monthnames[mon] + " " + year, false, year, mon); row = createTiddlyElement(calendar, "tr", null, null, null); createCalendarDayHeader(row, 1); createCalendarDayRowsSingle(calendar, year, mon); } //}}} //{{{ function createCalendarYear(calendar, year) { var row; row = createTiddlyElement(calendar, "tr", null, null, null); var back = createTiddlyElement(row, "td", null, null, null); var backHandler = function() { removeChildren(calendar); createCalendarYear(calendar, year-1); }; createTiddlyButton(back, "<", "Previous year", backHandler); back.align = "center"; var yearHeader = createTiddlyElement(row, "td", null, "calendarYear", year); yearHeader.align = "center"; //yearHeader.setAttribute("colSpan", 19); yearHeader.setAttribute("colSpan",config.options.chkDisplayWeekNumbers?22:19);//wn** var fwd = createTiddlyElement(row, "td", null, null, null); var fwdHandler = function() { removeChildren(calendar); createCalendarYear(calendar, year+1); }; createTiddlyButton(fwd, ">", "Next year", fwdHandler); fwd.align = "center"; createCalendarMonthRow(calendar, year, 0); createCalendarMonthRow(calendar, year, 3); createCalendarMonthRow(calendar, year, 6); createCalendarMonthRow(calendar, year, 9); } //}}} //{{{ function createCalendarMonthRow(cal, year, mon) { var row = createTiddlyElement(cal, "tr", null, null, null); createCalendarMonthHeader(cal, row, config.macros.calendar.monthnames[mon], false, year, mon); createCalendarMonthHeader(cal, row, config.macros.calendar.monthnames[mon+1], false, year, mon); createCalendarMonthHeader(cal, row, config.macros.calendar.monthnames[mon+2], false, year, mon); row = createTiddlyElement(cal, "tr", null, null, null); createCalendarDayHeader(row, 3); createCalendarDayRows(cal, year, mon); } //}}} //{{{ function createCalendarMonthHeader(cal, row, name, nav, year, mon) { var month; if(nav) { var back = createTiddlyElement(row, "td", null, null, null); back.align = "center"; back.style.background = config.macros.calendar.monthbg; /* back.setAttribute("colSpan", 2); var backYearHandler = function() { var newyear = year-1; removeChildren(cal); cacheReminders(new Date(newyear, mon , 1, 0, 0), 31); createCalendarOneMonth(cal, newyear, mon); }; createTiddlyButton(back, "<<", "Previous year", backYearHandler); */ var backMonHandler = function() { var newyear = year; var newmon = mon-1; if(newmon == -1) { newmon = 11; newyear = newyear-1;} removeChildren(cal); cacheReminders(new Date(newyear, newmon , 1, 0, 0), 31); createCalendarOneMonth(cal, newyear, newmon); }; createTiddlyButton(back, "<", "Previous month", backMonHandler); month = createTiddlyElement(row, "td", null, "calendarMonthname", name) // month.setAttribute("colSpan", 3); // month.setAttribute("colSpan", 5); month.setAttribute("colSpan", config.options.chkDisplayWeekNumbers?6:5);//wn** var fwd = createTiddlyElement(row, "td", null, null, null); fwd.align = "center"; fwd.style.background = config.macros.calendar.monthbg; // fwd.setAttribute("colSpan", 2); var fwdMonHandler = function() { var newyear = year; var newmon = mon+1; if(newmon == 12) { newmon = 0; newyear = newyear+1;} removeChildren(cal); cacheReminders(new Date(newyear, newmon , 1, 0, 0), 31); createCalendarOneMonth(cal, newyear, newmon); }; createTiddlyButton(fwd, ">", "Next month", fwdMonHandler); /* var fwdYear = createTiddlyElement(row, "td", null, null, null); var fwdYearHandler = function() { var newyear = year+1; removeChildren(cal); cacheReminders(new Date(newyear, mon , 1, 0, 0), 31); createCalendarOneMonth(cal, newyear, mon); }; createTiddlyButton(fwd, ">>", "Next year", fwdYearHandler); */ } else { month = createTiddlyElement(row, "td", null, "calendarMonthname", name) //month.setAttribute("colSpan", 7); month.setAttribute("colSpan",config.options.chkDisplayWeekNumbers?8:7);//wn** } month.align = "center"; month.style.background = config.macros.calendar.monthbg; } //}}} //{{{ function createCalendarDayHeader(row, num) { var cell; for(var i = 0; i < num; i++) { if (config.options.chkDisplayWeekNumbers) createTiddlyElement(row, "td");//wn** for(var j = 0; j < 7; j++) { var d = j + (config.options.txtCalFirstDay - 0); if(d > 6) d = d - 7; cell = createTiddlyElement(row, "td", null, null, config.macros.calendar.daynames[d]); if(d == (config.options.txtCalStartOfWeekend-0) || d == (config.options.txtCalStartOfWeekend-0+1)) cell.style.background = config.macros.calendar.weekendbg; } } } //}}} //{{{ function createCalendarDays(row, col, first, max, year, mon) { var i; if (config.options.chkDisplayWeekNumbers){ if (first<=max) { var ww = new Date(year,mon,first); createTiddlyElement(row, "td", null, null, "w"+ww.getWeek());//wn** } else createTiddlyElement(row, "td", null, null, null);//wn** } for(i = 0; i < col; i++) { createTiddlyElement(row, "td", null, null, null); } var day = first; for(i = col; i < 7; i++) { var d = i + (config.options.txtCalFirstDay - 0); if(d > 6) d = d - 7; var daycell = createTiddlyElement(row, "td", null, null, null); var isaWeekend = ((d == (config.options.txtCalStartOfWeekend-0) || d == (config.options.txtCalStartOfWeekend-0+1))? true:false); if(day > 0 && day <= max) { var celldate = new Date(year, mon, day); // ELS 2005.10.30: use <<date>> macro's showDate() function to create popup if (window.showDate) { showDate(daycell,celldate,"popup","DD",config.macros.calendar.journalDateFmt,true, isaWeekend); // ELS 5/29/06 - use journalDateFmt } else { if(isaWeekend) daycell.style.background = config.macros.calendar.weekendbg; var title = celldate.formatString(config.macros.calendar.tiddlerformat); if(calendarIsHoliday(celldate)) { daycell.style.background = config.macros.calendar.holidaybg; } if(window.findTiddlersWithReminders == null) { var link = createTiddlyLink(daycell, title, false); link.appendChild(document.createTextNode(day)); } else { var button = createTiddlyButton(daycell, day, title, onClickCalendarDate); } } } day++; } } //}}} // //We've clicked on a day in a calendar - create a suitable pop-up of options. // //The pop-up should contain: // // * a link to create a new entry for that date // // * a link to create a new reminder for that date // // * an <hr> // // * the list of reminders for that date //{{{ function onClickCalendarDate(e) { var button = this; var date = button.getAttribute("title"); var dat = new Date(date.substr(6,4), date.substr(3,2)-1, date.substr(0, 2)); date = dat.formatString(config.macros.calendar.tiddlerformat); var popup = createTiddlerPopup(this); popup.appendChild(document.createTextNode(date)); var newReminder = function() { var t = store.getTiddlers(date); displayTiddler(null, date, 2, null, null, false, false); if(t) { document.getElementById("editorBody" + date).value += "\n<<reminder day:" + dat.getDate() + " month:" + (dat.getMonth()+1) + " year:" + (dat.getYear()+1900) + " title: >>"; } else { document.getElementById("editorBody" + date).value = "<<reminder day:" + dat.getDate() + " month:" + (dat.getMonth()+1) + " year:" + (dat.getYear()+1900) + " title: >>"; } }; var link = createTiddlyButton(popup, "New reminder", null, newReminder); popup.appendChild(document.createElement("hr")); var t = findTiddlersWithReminders(dat, [0,14], null, 1); for(var i = 0; i < t.length; i++) { link = createTiddlyLink(popup, t[i].tiddler, false); link.appendChild(document.createTextNode(t[i].tiddler)); } } //}}} //{{{ function calendarMaxDays(year, mon) { var max = config.macros.calendar.monthdays[mon]; if(mon == 1 && (year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)) { max++; } return max; } //}}} //{{{ function createCalendarDayRows(cal, year, mon) { var row = createTiddlyElement(cal, "tr", null, null, null); var first1 = (new Date(year, mon, 1)).getDay() -1 - (config.options.txtCalFirstDay-0); if(first1 < 0) first1 = first1 + 7; var day1 = -first1 + 1; var first2 = (new Date(year, mon+1, 1)).getDay() -1 - (config.options.txtCalFirstDay-0); if(first2 < 0) first2 = first2 + 7; var day2 = -first2 + 1; var first3 = (new Date(year, mon+2, 1)).getDay() -1 - (config.options.txtCalFirstDay-0); if(first3 < 0) first3 = first3 + 7; var day3 = -first3 + 1; var max1 = calendarMaxDays(year, mon); var max2 = calendarMaxDays(year, mon+1); var max3 = calendarMaxDays(year, mon+2); while(day1 <= max1 || day2 <= max2 || day3 <= max3) { row = createTiddlyElement(cal, "tr", null, null, null); createCalendarDays(row, 0, day1, max1, year, mon); day1 += 7; createCalendarDays(row, 0, day2, max2, year, mon+1); day2 += 7; createCalendarDays(row, 0, day3, max3, year, mon+2); day3 += 7; } } //}}} //{{{ function createCalendarDayRowsSingle(cal, year, mon) { var row = createTiddlyElement(cal, "tr", null, null, null); var first1 = (new Date(year, mon, 1)).getDay() -1 - (config.options.txtCalFirstDay-0); if(first1 < 0) first1 = first1+ 7; var day1 = -first1 + 1; var max1 = calendarMaxDays(year, mon); while(day1 <= max1) { row = createTiddlyElement(cal, "tr", null, null, null); createCalendarDays(row, 0, day1, max1, year, mon); day1 += 7; } } //}}} // //ELS 2005.10.30: added styles //{{{ setStylesheet(".calendar, .calendar table, .calendar th, .calendar tr, .calendar td { text-align:center; } .calendar, .calendar a { margin:0px !important; padding:0px !important; }", "calendarStyles"); //}}} /*** |''Name:''|Based on CollapseTiddlersPlugin| |''Source:''|http://gensoft.revhost.net/Collapse.html| |''Author:''|Bradley Meck| |''License:''|unknown| |''~CoreVersion:''|2.0.10| |JOS 9/14/2006: changed text for 'collapse all' and 'expand all' to lower-case (consistency's sake); cleanned-up syntax (readability's sake) | |JOS 9/14/2006: removed "WebCollapsedTemplate" altogether; added compat code for topOfPageMode; added tool tips for collapseAll and expandAll | |ELS 2/24/2006: added fallback to "CollapsedTemplate if "WebCollapsedTemplate" is not found | |ELS 2/6/2006: added check for 'readOnly' flag to use alternative "WebCollapsedTemplate" | ***/ //{{{ config.commands.collapseTiddler = { text: "fold", tooltip: "Collapse this tiddler", handler: function(event,src,title){ var e = story.findContainingTiddler(src); var t = "CollapsedTemplate"; if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; } if (config.options.chkTopOfPageMode!=undefined) { var pm=config.options.chkTopOfPageMode; config.options.chkTopOfPageMode=false; } if(e.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){ if(e.getAttribute("template") != t ){ e.setAttribute("oldTemplate",e.getAttribute("template")); story.displayTiddler(null,title,t); } } if (config.options.chkTopOfPageMode!=undefined) config.options.chkTopOfPageMode=pm; } } config.commands.expandTiddler = { text: "unfold", tooltip: "Expand this tiddler", handler: function(event,src,title){ if (config.options.chkTopOfPageMode!=undefined) { var pm=config.options.chkTopOfPageMode; config.options.chkTopOfPageMode=false; } var e = story.findContainingTiddler(src); story.displayTiddler(null,title,e.getAttribute("oldTemplate")); if (config.options.chkTopOfPageMode!=undefined) config.options.chkTopOfPageMode=pm; } } config.macros.collapseAll = { handler: function(place,macroName,params,wikifier,paramString,tiddler){ createTiddlyButton(place,"collapse all","Collapse all tiddlers",function(){ var t = "CollapsedTemplate"; if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; } if (config.options.chkTopOfPageMode!=undefined) { var pm=config.options.chkTopOfPageMode; config.options.chkTopOfPageMode=false; } story.forEachTiddler(function(title,tiddler){ if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]) story.displayTiddler(null,title,t); }) if (config.options.chkTopOfPageMode!=undefined) config.options.chkTopOfPageMode=pm; }) } } config.macros.expandAll = { handler: function(place,macroName,params,wikifier,paramString,tiddler){ createTiddlyButton(place,"expand all","",function(){ var t = "CollapsedTemplate"; if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; } if (config.options.chkTopOfPageMode!=undefined) { var pm=config.options.chkTopOfPageMode; config.options.chkTopOfPageMode=false; } story.forEachTiddler(function(title,tiddler){ if(tiddler.getAttribute("template") == t) story.displayTiddler(null,title,tiddler.getAttribute("oldTemplate")); }) if (config.options.chkTopOfPageMode!=undefined) config.options.chkTopOfPageMode=pm; }) } } config.commands.collapseOthers = { text: "focus", tooltip: "Expand this tiddler and collapse all others", handler: function(event,src,title){ var e = story.findContainingTiddler(src); var t = "CollapsedTemplate"; if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; } if (config.options.chkTopOfPageMode!=undefined) { var pm=config.options.chkTopOfPageMode; config.options.chkTopOfPageMode=false; } story.forEachTiddler(function(title,tiddler){ if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){ if (tiddler!=e) story.displayTiddler(null,title,t); } }) if (config.options.chkTopOfPageMode!=undefined) config.options.chkTopOfPageMode=pm; } } //}}} <!--{{{--> <div class='toolbar' macro='toolbar closeTiddler closeOthers +expandTiddler collapseOthers editTiddler permalink'></div> <div class='title' macro='view title'></div> <!--}}}--> /*** |''Source:''|http://ziddlywiki.com/forum#CommentPlugin| |''Credit:''|Tim Morgan| ''Adds "comments" to any TiddlyWiki or adaptation.'' To use, copy this tiddler's contents to a new tiddler on your site and tag it {{{systemConfig}}}. Used in conjunction with the RecentPlugin, one can have a decent forum environment. ***/ //{{{ config.CommentPlugin = { only_on_tags: [], not_on_tags: ['about'], // "true" or "false"... fold_comments: true, default_fold: false }; function in_array(item, arr){for(var i=0;i<arr.length;i++)if(item==arr[i])return true}; function one_in_array(items, arr){for(var i=0;i<items.length;i++)if(in_array(items[i], arr))return true;return false}; function get_parent(tiddler){while(in_array('comments', tiddler.tags)) tiddler=store.fetchTiddler(tiddler.tags[0]);return tiddler}; function count_comments(tiddler){var tagged=store.getTaggedTiddlers(tiddler.title);var count=0;for(var i=0;i<tagged.length;i++)if(in_array('comments', tagged[i].tags)){count++;count+=count_comments(tagged[i])}return count}; config.shadowTiddlers.ViewTemplate += "\n<div class='comments' macro='comments'></div>"; config.shadowTiddlers.StyleSheetLayout += '\n.commentTags ul{list-style:none; padding-left:0px;margin: 0 0 3px 0;} .commentTags li{display:inline;color:#999;} .commentTags li a.button{color:#999;} .comment{border-left:1px solid #ccc; margin-top:10px; margin-left:10px; padding:5px;} .newCommentLink{padding-top:10px} .tagging, .selected .tagging, .tiddler .tagging{display:none;} .comment a.button{padding:0px; font-size:smaller;}'; config.macros.newCommentLink = { label: 'New Comment Here...', prompt: 'Create a new comment tiddler associated with this tiddler', handler: function(place,macroName,params,wikifier,paramString,tiddler) { if(tiddler && store.tiddlerExists(tiddler.title) && !readOnly && (!window.zw || zw.loggedIn || zw.anonEdit)) { if(config.CommentPlugin.only_on_tags.length>0 && !one_in_array(tiddler.tags, config.CommentPlugin.only_on_tags)) return; if(config.CommentPlugin.not_on_tags.length>0 && one_in_array(tiddler.tags, config.CommentPlugin.not_on_tags)) return; var onclick = function(e) { if (!e) var e = window.event; var theTarget = resolveTarget(e); if(tiddler.title.indexOf(' Comment ')>-1) var title = tiddler.title.split(' Comment ')[0]; else var title = tiddler.title; var title = title + ' Comment ' + (new Date()).formatString('YYYY-0MM-0DD 0hh:0mm:0ss'); var comment = store.createTiddler(title); comment.text = ''; comment.tags = [tiddler.title, 'comments', 'excludeLists']; story.displayTiddler(theTarget, title, DEFAULT_EDIT_TEMPLATE); story.focusTiddler(title,"text"); return false; } createTiddlyButton(place, this.label, this.prompt, onclick); } } }; config.macros.comments = { dateFormat: 'DD MMM YYYY hh:0mm', handler: function(place,macroName,params,wikifier,paramString,tiddler) { if(tiddler.title=='comments') return; var comments = store.getTaggedTiddlers(tiddler.title, 'created'); if(comments.length>0 && !in_array('comments', tiddler.tags) && config.CommentPlugin.fold_comments) { var show = createTiddlyElement(place, 'p'); show.innerHTML = '<a href="#" onclick="var e=document.getElementById(\'comments'+tiddler.title+'\');e.style.display=e.style.display==\'block\'?\'none\':\'block\';return false;">Comments ('+count_comments(tiddler)+') &raquo;</a>'; } var place = createTiddlyElement(place, 'div', 'comments'+tiddler.title, 'comments'); if(comments.length>0 && !in_array('comments', tiddler.tags) && config.CommentPlugin.fold_comments && config.CommentPlugin.default_fold) place.style.display = 'none'; else place.style.display = 'block'; for(var i=0; i<comments.length; i++) { if(!in_array('comments', comments[i].tags))continue; var container = createTiddlyElement(place, 'div', null, 'comment'); var title = createTiddlyElement(container, 'strong'); var link = createTiddlyLink(title, comments[i].modifier, true); createTiddlyElement(title, 'span', null, null, ', '+comments[i].created.formatString(this.dateFormat)); if(comments[i].modifier == config.options.txtUserName) { createTiddlyElement(title, 'span', null, null, ' ('); var edit = createTiddlyLink(title, comments[i].title); edit.innerHTML = 'edit'; createTiddlyElement(title, 'span', null, null, ')'); } createTiddlyElement(container, 'br'); config.macros.tiddler.handler(container, null, [comments[i].title]); createTiddlyElement(container, 'br'); config.macros.comments.handler(container,null,null,null,null,comments[i]); } config.macros.newCommentLink.handler(place,null,null,null,null,tiddler); } }; var CPCloseTiddlers = []; TiddlyWiki.prototype.CommentPlugin_saveTiddler = TiddlyWiki.prototype.saveTiddler; TiddlyWiki.prototype.saveTiddler = function(title,newTitle,newBody,modifier,modified,tags) { var t = this.CommentPlugin_saveTiddler(title,newTitle,newBody,modifier,modified,tags); var tags = tags.split(/\s+/g); if(in_array('comments', tags)) { var original = config.CommentPlugin.default_fold; config.CommentPlugin.default_fold = false; story.refreshTiddler(get_parent(t).title, DEFAULT_VIEW_TEMPLATE, true); config.CommentPlugin.default_fold = original; CPCloseTiddlers.push(newTitle); setTimeout("story.closeTiddler(CPCloseTiddlers.pop(), true)", 1000); } return t; }; //}}} * ''Next action'': Announce DeepArrow 0.0 * To do ++++ * Announce DeepArrow 0.0 === * Done +++ * Heard back from Ross. +++> There's no remove facility at present, though the plan was to prevent repeat uploads even if the original was removed. That way a version would denote a unique instance. But because we're all feeling our way at present, I've turned off the repeated upload check for now. === I re-uploaded 0.0. * If Ross P removes 0.0 from Hackage, re-upload it. Otherwise, bump version in .cabal, follow [[project release check-list]], and delete the 0.0 dist on darcs.haskell.org (which will differ from Hackage's). * Asked Ross to remove my 0.0 package from Hackage. * [[project release check-list]] * Examples on the wiki page. Took from the Eros paper. * Better: Haddock'd examples in {{{src/Examples}}}. * Learned about hit counting on the Haskell wiki. No need for a statcounter. Mediawiki does hit counting per page. The Quiet skin had hidden the counts, but the other skins show it. I asked Ashley Y to add the counter back to Quiet, and meanwhile I've switched. * Substitute mtl for monadLib for DeepArrow, Phooey, and TV. It comes along with Hugs and GHC and so simplifies installation. * Documentation. For starters, I might point to the Eros paper. * Wiki page * Link docs to [[TV]] project (wiki) page. * Rename {{{ArrowX}}} to {{{DeepArrow}}} in the Eros paper and put new version on the web. * Sync with monadLib-3.1.0 * External library links * Docs with source & comment pointers * Use [[cabal-make]] * Use in TV * Make a Cabalized library === TiddlyWiki /*** |''Name:''|DisableWikiLinksPlugin| |''Source:''|http://www.TiddlyTools.com/#DisableWikiLinksPlugin| |''Author:''|Eric Shulman - ELS Design Studios| |''License:''|[[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]| |''~CoreVersion:''|2.0.10| This plugin allows you to disable TiddlyWiki's automatic WikiWord linking behavior, so that WikiWords embedded in tiddler content will be rendered as regular text, instead of being automatically converted to tiddler links. To create a tiddler link when automatic linking is disabled, you must enclose the link text within {{{[[...]]}}}. You can also block automatic WikiWord linking behavior only for non-existing tiddler titles, while still automatically linking WikiWords that correspond to existing tiddlers titles or shadow tiddler titles. You can also block specific selected WikiWords from being automatically linked by listing them in [[DisableWikiLinksList]], separated by whitespace. This tiddler is optional and, when present, causes the listed words to always be excluded, even if automatic linking of other WikiWords is being permitted. Note: WikiWords contained in default ''shadow'' tiddlers will be automatically linked unless you select an additional checkbox option lets you disable these automatic links as well, though this is not recommended, since it can make it more difficult to access some TiddlyWiki standard default content (such as AdvancedOptions or SideBarTabs) !!!!!Configuration <<< Self-contained control panel: <<option chkDisableNonExistingWikiLinks>> Disable automatic WikiWord links for non-existing tiddlers <<option chkDisableWikiLinks>> Disable ALL automatic WikiWord tiddler links <<option chkAllowLinksFromShadowTiddlers>> ... except for WikiWords contained in shadow tiddlers <<< !!!!!Installation <<< import (or copy/paste) the following tiddlers into your document: ''DisableWikiLinksPlugin'' (tagged with <<tag systemConfig>>) <<< !!!!!Revision History <<< ''2006.12.31 [1.4.0]'' in formatter, test for chkDisableNonExistingWikiLinks ''2006.12.09 [1.3.0]'' in formatter, test for excluded wiki words specified in DisableWikiLinksList ''2006.12.09 [1.2.2]'' fix logic in autoLinkWikiWords() (was allowing links TO shadow tiddlers, even when chkDisableWikiLinks is TRUE). ''2006.12.09 [1.2.1]'' revised logic for handling links in shadow content ''2006.12.08 [1.2.0]'' added hijack of Tiddler.prototype.autoLinkWikiWords so regular (non-bracketed) WikiWords won't be added to the missing list ''2006.05.24 [1.1.0]'' added option to NOT bypass automatic wikiword links when displaying default shadow content (default is to auto-link shadow content) ''2006.02.05 [1.0.1]'' wrapped wikifier hijack in init function to eliminate globals and avoid FireFox 1.5.0.1 crash bug when referencing globals ''2005.12.09 [1.0.0]'' initial release <<< !!!!!Credits <<< This feature was developed by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] <<< !!!!!Code ***/ //{{{ version.extensions.disableWikiLinks= {major: 1, minor: 4, revision: 0, date: new Date(2006,12,31)}; if (config.options.chkDisableNonExistingWikiLinks==undefined) config.options.chkDisableNonExistingWikiLinks= true; if (config.options.chkDisableWikiLinks==undefined) config.options.chkDisableWikiLinks= false; if (config.options.chkAllowLinksFromShadowTiddlers==undefined) config.options.chkAllowLinksFromShadowTiddlers=true; // find the formatter for wikiLink and replace handler with 'pass-thru' rendering initDisableWikiLinksFormatter(); function initDisableWikiLinksFormatter() { for (var i=0; i<config.formatters.length && config.formatters[i].name!="wikiLink"; i++); config.formatters[i].coreHandler=config.formatters[i].handler; config.formatters[i].handler=function(w) { // supress any leading "~" (if present) var skip=(w.matchText.substr(0,1)==config.textPrimitives.unWikiLink)?1:0; var title=w.matchText.substr(skip); var exists=store.tiddlerExists(title); var inShadow=w.tiddler && store.isShadowTiddler(w.tiddler.title); // check for specific excluded wiki words var t=store.getTiddlerText("DisableWikiLinksList") if (t && t.length && t.indexOf(w.matchText)!=-1) { w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; } // if not disabling links from shadows (default setting) if (config.options.chkAllowLinksFromShadowTiddlers && inShadow) return this.coreHandler(w); // check for non-existing non-shadow tiddler if (config.options.chkDisableNonExistingWikiLinks && !exists) { w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; } // if not enabled, just do standard WikiWord link formatting if (!config.options.chkDisableWikiLinks) return this.coreHandler(w); // just return text without linking w.outputText(w.output,w.matchStart+skip,w.nextMatch) } } Tiddler.prototype.coreAutoLinkWikiWords = Tiddler.prototype.autoLinkWikiWords; Tiddler.prototype.autoLinkWikiWords = function() { // DEBUG alert("processing: "+this.title); // if all automatic links are not disabled, just return results from core function if (!config.options.chkDisableWikiLinks) return this.coreAutoLinkWikiWords.apply(this,arguments); return false; } //}}} <!--{{{--> <div class='toolbar' macro='toolbar +saveTiddler -cancelTiddler copyTiddler deleteTiddler autosizeEditor increaseEditor decreaseEditor resizeEditor'></div> <div class='title' macro='view title'></div> <div class='editor' macro='edit title'></div> <div class='editor' macro='edit text'></div> <div class='editor' macro='edit tags'></div> <div class='editorFooter'> <span macro='message views.editor.tagPrompt'></span> <span macro='tagChooser'></span> <!-- <div macro='resize'></div> --> </div> <!--}}}--> * For twee.el, keybound functions: ** Start a new line with the same *'s or #'s as the current logical line. I don't know how to get to the start of the current logical line in a longlines mode. ** Add or remove a bullet or numbering nesting level for a region. Where to go with Eros: * Get darcs-server working. //Status//: Waiting to hear from Daan ([[2006-11-16]]). * Learn about cabal, and use it. * Rework implementation in terms of values, with no syntactic representation and no code generation. Structure with enough flexibility so that I can slip in alternative representations, to support persistence and optimization. See if composition speeds up significantly. * Improve highlighting. Doesn't always work with sliders. Never with image output. * How to retain input state? * Try out some simple 3D, via ~OpenGL. Might work fine without compilation. More at {{{c:/conal/Eros/Haskell/todo.txt}}} |from|"Paul R. Potts" <paul@thepottshouse.org>| |to|conal@conal.net| |date|Mar 31, 2007 3:54 PM| |subject|comments on "Tangible Functional Programming"| Hi Conal, I'm sorry these comments are so late. I hope they are still useful to you. General comments first: * The presentation of the GUI and the way it operates I find fairly confusing and unclear. In particular, the dynamic behavior of the GUI is very hard to convey here. I think the problem is really that you know too much about how it works to be writing this section -- it is too easy for you to review what you've written along with the screen shots, fill in the blanks, and believe that it really makes sense. It might produce better results to sit down with someone unfamiliar with the program and allow them to discover the operation, asking you questions, and then start with notes generated from that session. * I think the presentation on input extractors should logically come before function extractors, as it is easier to understand. Specifics in order of appearance: Abstract: "artistic/visual creative style." Maybe change to "visual learners"; there is at least some research on the way people learn. More generally, I'm a little bit baffled by your references to this as a creative tool rather than a learning tool. What is the "created" result -- an interactive program? A drawing? In general I think this tool and approach has a lot of pedagogical value but not necessarily much value as a tool for creation. Although it does remind me in some ways of the defunct mTropolis tool, a sort of Macromedia Director with a much more rational authoring paradigm. In para. 2 of the abstract, you mention "tuples, functions, etc." Wording "lists, tuples, or other functions" seems preferable to me. Introduction: para. 3: in several places you mention "value inspectors" without indicating that you can use them to modify the values. They seem more like property editors than inspectors, where inspector has a read-only connotation. Sec. 2.1: para. 3: "allows the user to view slices of the function dynamically." I'm not sure what you mean by "slices." Some relation to "sections?" Para. 4: you introduce magSq before explaining what it does. You should introduce the implementation of magSq the first time you use it. I found myself staring at 3, 4, and 25.0 with only the type signature wondering how you got 25. The whole section 2.2 became every confusing for me. From the GUI screen shots, I still can't make any sense out of why the empty sace between the inputs containing 3 and 4 is an input -- I guess that represents the pair -- why the space between 6 and the pair is the output apparently of the whole combination, and why the space below the pair seems to be the partial/curried (Double, Double) -> Bool. I'm sure using the GUI tool for just a few seconds would clear this up, but I think this representation is just not clear in general. And you never show what is in your application's menus, although you mention it in the text. A real step-by-step would probably clarify this part enormously. Intermixing the GUI and the program representation at this stage probably hampers understanding. There are too many overlapping concepts here: the dynamic behavior of the GUI, the program representations of the GUI objects, and the (actually very simple) program behavior that the representation is expressing. Consider coming at this in three completely separate consecutive passes, the GUI step by step, the program step by step, and then the "wrapped" code including the GUI objects as code. Sec. 2.2 4th para, code following: I don't know what the asterisks are for here. This may be some Haskell I'm not familiar with yet. 5th para., : "the visualization of in" should read "the visualization in" 6th para., makes much more sense here. Sec. 2.3 para 3: "The Haskell expressions appearing at the top of TVs rely that alternative." Do you mean "...appearing at the tops of the TVs _reify_ that alternative"? or "rely on" ? Unclear. Sec. 3: Again, consider rearranging this presentation to show progression of visual examples step by step. It would help a great deal to make sure that figures 12 and 13 appear on the same page; the page break here is particularly bad. Also, it isn't possible from the figures to understand the "vanishing" inputs and outputs unless you show before/after, step by step. I think this is really not very good GUI design; "wires" to indicate connection would be better, and a clear method is needed to undo the connection. Figure 14: I'm not sure why your conjunction operator has two outs; one of them a section (&& y)? It seems like the placement and meaning of the input/output bars is generally confusing. Last para. sec. 4: are tweaked links represented visually in some way? How do you select something to tweak/untweak, and how do you know you've tweaked it? Sec. 4 para. 3: the use of "upper" and "lower" referring to figure 11 -- which is not on this page, a layout problem -- is very confusing. You use "TV" to refer to a whole window in the GUI, but here you are referring to a part of a window's contents. There are 3 windows; what are you talking about when you refer to "lower left" and "lower right?" I have no clue. Also, how do you hold "xPos" (I think you mean you select the box). This needs some consistent language here. Sec. 4.1 footnote 5: I'm not really clear on what you mean here. Are you talking about operations on an AST (what would be s-expressions in a Lispy language) v. compiled code, because an expression like 3+4 would be collapsed to a single value? Can you even do that in Haskell? If you can't, you seem to be undermining your own project in favor of a language with macro transformers as opposed to graph-rewriting? What is the advantage? Strong typing? That's why I'm not clear what you mean! In general I should say that the "mathematical" part (sec. 4) is clearer, although due to my general knowledge of Haskell and FP I can't follow your argument very well much beyond sec. 4.4. A few misc. comments: Why at the top of p. 7 are you suddenly using named parameters ("h") as opposed to point-free style? Last code section in 4.3, why test2 instead of just test? By 5.1 I'm pretty well lost, but that's not your fault. In footnote 7 I think you might want to refer to "legal Haskell 98 [reference] syntax," instead of "standard Haskell." The layout of the text across pages in sec. 5.3 is unfortunate and hard to follow, particularly where you pick up on p. 9. I'm confused by the elipses (...). Maybe the text could continue after figure 18? Confusing that some of your code is inline, and some in "figures." Sec. 6 para. 4, text "With every input node, associate a function extractor..." change these usages to make them parallel, like "there is an associated function extractor" or "we associate" p. 11 bottom left, another use of asterisks; maybe some Haskell syntax I'm unfamiliar with... p. 11 to right, intentional to show incomplete code here with ellipses? Sec. 8 para. 1: "Our use of a term reresentation" -- change to "Our display of" para. 2: "the size and complexity of a TV is dependent only it type," change to "only on its type" (I think that's what you mean, at least). Later para. 2: "value and an accompanying visualization" should read either "a value and an accompanying representation" or just "value and accompanying rep..." (make the usage parallel, either both use a/an or both don't). I think your point about the composition step producing a more complex representation is interesting here, having used a lot of Visual programming environments like HyperCard, Visual BASIC, the origina Cocoa, Macromedia Director, Toolbook, etc. The last para. in section 8, about referring to function distinct from value, has gotten lost a bit and is unclear in your visual examples. It doesn't help the way you refer to "value-based" and "semantic" as your approach, as distinct from "syntactic." Sec. 8.2, para. 2, "Eros remedy" should read "Eros remedies" and "definability" should probably read "definition." In the text that follows when you are talking about systems as presented in books or papers, your tense is inconsistent; I'd make everything present tense ("relaxes", "explores"). End of para 3, "shows" instead of "does show," you need a comma after "intermediate values," and the last sentence here says "uses a lacks" which I think should just read "lacks." Last sentence before references section ends with two periods. That's all... hope this is helpful! Paul -- Paul R. Potts - paul@thepottshouse.org - http://thepottshouse.org |from|"Paul R. Potts" <paul@thepottshouse.org>| |to|conal@conal.net| |date|Mar 31, 2007 3:54 PM| |subject|comments on "Tangible Functional Programming"| Hi Conal, I'm sorry these comments are so late. I hope they are still useful to you. General comments first: - The presentation of the GUI and the way it operates I find fairly confusing and unclear. In particular, the dynamic behavior of the GUI is very hard to convey here. I think the problem is really that you know too much about how it works to be writing this section -- it is too easy for you to review what you've written along with the screen shots, fill in the blanks, and believe that it really makes sense. It might produce better results to sit down with someone unfamiliar with the program and allow them to discover the operation, asking you questions, and then start with notes generated from that session. - I think the presentation on input extractors should logically come before function extractors, as it is easier to understand. Specifics in order of appearance: Abstract: "artistic/visual creative style." Maybe change to "visual learners"; there is at least some research on the way people learn. More generally, I'm a little bit baffled by your references to this as a creative tool rather than a learning tool. What is the "created" result -- an interactive program? A drawing? In general I think this tool and approach has a lot of pedagogical value but not necessarily much value as a tool for creation. Although it does remind me in some ways of the defunct mTropolis tool, a sort of Macromedia Director with a much more rational authoring paradigm. In para. 2 of the abstract, you mention "tuples, functions, etc." Wording "lists, tuples, or other functions" seems preferable to me. Introduction: para. 3: in several places you mention "value inspectors" without indicating that you can use them to modify the values. They seem more like property editors than inspectors, where inspector has a read-only connotation. Sec. 2.1: para. 3: "allows the user to view slices of the function dynamically." I'm not sure what you mean by "slices." Some relation to "sections?" Para. 4: you introduce magSq before explaining what it does. You should introduce the implementation of magSq the first time you use it. I found myself staring at 3, 4, and 25.0 with only the type signature wondering how you got 25. The whole section 2.2 became every confusing for me. From the GUI screen shots, I still can't make any sense out of why the empty sace between the inputs containing 3 and 4 is an input -- I guess that represents the pair -- why the space between 6 and the pair is the output apparently of the whole combination, and why the space below the pair seems to be the partial/curried (Double, Double) -> Bool. I'm sure using the GUI tool for just a few seconds would clear this up, but I think this representation is just not clear in general. And you never show what is in your application's menus, although you mention it in the text. A real step-by-step would probably clarify this part enormously. Intermixing the GUI and the program representation at this stage probably hampers understanding. There are too many overlapping concepts here: the dynamic behavior of the GUI, the program representations of the GUI objects, and the (actually very simple) program behavior that the representation is expressing. Consider coming at this in three completely separate consecutive passes, the GUI step by step, the program step by step, and then the "wrapped" code including the GUI objects as code. Sec. 2.2 4th para, code following: I don't know what the asterisks are for here. This may be some Haskell I'm not familiar with yet. 5th para., : "the visualization of in" should read "the visualization in" 6th para., makes much more sense here. Sec. 2.3 para 3: "The Haskell expressions appearing at the top of TVs rely that alternative." Do you mean "...appearing at the tops of the TVs _reify_ that alternative"? or "rely on" ? Unclear. Sec. 3: Again, consider rearranging this presentation to show progression of visual examples step by step. It would help a great deal to make sure that figures 12 and 13 appear on the same page; the page break here is particularly bad. Also, it isn't possible from the figures to understand the "vanishing" inputs and outputs unless you show before/after, step by step. I think this is really not very good GUI design; "wires" to indicate connection would be better, and a clear method is needed to undo the connection. Figure 14: I'm not sure why your conjunction operator has two outs; one of them a section (&& y)? It seems like the placement and meaning of the input/output bars is generally confusing. Last para. sec. 4: are tweaked links represented visually in some way? How do you select something to tweak/untweak, and how do you know you've tweaked it? Sec. 4 para. 3: the use of "upper" and "lower" referring to figure 11 -- which is not on this page, a layout problem -- is very confusing. You use "TV" to refer to a whole window in the GUI, but here you are referring to a part of a window's contents. There are 3 windows; what are you talking about when you refer to "lower left" and "lower right?" I have no clue. Also, how do you hold "xPos" (I think you mean you select the box). This needs some consistent language here. Sec. 4.1 footnote 5: I'm not really clear on what you mean here. Are you talking about operations on an AST (what would be s-expressions in a Lispy language) v. compiled code, because an expression like 3+4 would be collapsed to a single value? Can you even do that in Haskell? If you can't, you seem to be undermining your own project in favor of a language with macro transformers as opposed to graph-rewriting? What is the advantage? Strong typing? That's why I'm not clear what you mean! In general I should say that the "mathematical" part (sec. 4) is clearer, although due to my general knowledge of Haskell and FP I can't follow your argument very well much beyond sec. 4.4. A few misc. comments: Why at the top of p. 7 are you suddenly using named parameters ("h") as opposed to point-free style? Last code section in 4.3, why test2 instead of just test? By 5.1 I'm pretty well lost, but that's not your fault. In footnote 7 I think you might want to refer to "legal Haskell 98 [reference] syntax," instead of "standard Haskell." The layout of the text across pages in sec. 5.3 is unfortunate and hard to follow, particularly where you pick up on p. 9. I'm confused by the elipses (...). Maybe the text could continue after figure 18? Confusing that some of your code is inline, and some in "figures." Sec. 6 para. 4, text "With every input node, associate a function extractor..." change these usages to make them parallel, like "there is an associated function extractor" or "we associate" p. 11 bottom left, another use of asterisks; maybe some Haskell syntax I'm unfamiliar with... p. 11 to right, intentional to show incomplete code here with ellipses? Sec. 8 para. 1: "Our use of a term reresentation" -- change to "Our display of" para. 2: "the size and complexity of a TV is dependent only it type," change to "only on its type" (I think that's what you mean, at least). Later para. 2: "value and an accompanying visualization" should read either "a value and an accompanying representation" or just "value and accompanying rep..." (make the usage parallel, either both use a/an or both don't). I think your point about the composition step producing a more complex representation is interesting here, having used a lot of Visual programming environments like HyperCard, Visual BASIC, the origina Cocoa, Macromedia Director, Toolbook, etc. The last para. in section 8, about referring to function distinct from value, has gotten lost a bit and is unclear in your visual examples. It doesn't help the way you refer to "value-based" and "semantic" as your approach, as distinct from "syntactic." Sec. 8.2, para. 2, "Eros remedy" should read "Eros remedies" and "definability" should probably read "definition." In the text that follows when you are talking about systems as presented in books or papers, your tense is inconsistent; I'd make everything present tense ("relaxes", "explores"). End of para 3, "shows" instead of "does show," you need a comma after "intermediate values," and the last sentence here says "uses a lacks" which I think should just read "lacks." Last sentence before references section ends with two periods. That's all... hope this is helpful! Paul -- Paul R. Potts - paul@thepottshouse.org - http://thepottshouse.org /% |Name|ExpandSlidersScript| |Source|http://www.TiddlyTools.com/#ExpandSlidersScript| |Version|0.0.0| |Author|Eric Shulman - ELS Design Studios| |License|http://www.TiddlyTools.com/#LegalStatements <<br>>and [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]| |~CoreVersion|2.1| |Type|script| |Requires|| |Overrides|| |Description|| %//% usage: <<tiddler ExpandSlidersScript with: elementID expandlabel collapselabel>> %/<script label="expand"> // if 'in a tiddler', expand all sliders... otherwise, expand based on passed in element ID var here=story.findContainingTiddler(place); if (!here) { if ("1"=="$"+"1") { alert("ExpandSlidersScript: not in a tiddler, please use 'with: elementID' syntax"); return; } var here=document.getElementById("$1");
if (!here) { alert("ExpandSlidersScript: unknown elementID: '$1'"); return; } } var expandlabel="expand"; if ("$2"!="$"+"2") var expandlabel="$2";
var collapselabel="collapse"; if ("$3"!="$"+"3") var collapselabel="$3"; var elems=here.getElementsByTagName("*"); var state=(place.innerHTML.toLowerCase().indexOf("expand")!=-1)?"none":"block"; for (var e=0; e<elems.length; e++) { var p=elems[e].sliderPanel; if (p && p.className=="sliderPanel") { if (p.style.display==state) window.onClickNestedSlider({target:elems[e]}); } } place.innerHTML=state=="none"?collapselabel:expandlabel; return false; </script><script> place.lastChild.className="button"; var expandlabel="expand"; if ("$2"!="$"+"2") var expandlabel="$2";
var collapselabel="collapse"; if ("$3"!="$"+"3") var collapselabel="\$3";
if (place.lastChild.innerHTML.toLowerCase().indexOf("expand")==-1)
place.lastChild.innerHTML=collapselabel;
else
place.lastChild.innerHTML=expandlabel;
</script>
/***
|FileDropPlugin|h
|version : 0.1.1|
|date : Nov 13 2006|
|usage : drag a file onto the TW to have it be made into a tiddler|
|browser(s) supported : Mozilla|

!Trouble Shooting
*If the plugin does not seem to work, open up the page "about:config" (just type it in the address bar) and make sure @@color(blue):signed.applets.codebase_principal_support@@ is set to @@color(blue):true@@
*Also, the plugin apparently depends on TW 2.1.

!Revisions
*Multiple File Dropping API updated, to end all capturing events after yours return a value that makes if(myFunctionsReturnValue) evaluate to true
*Added support for multiple file drop handlers
***Standard Flavor is "application/x-moz-file"
*Old plugin would disallow drops of text vetween applications because it didn't check if the transfer was a file.

!Example Handler
*Adds simple file import control, add this to a tiddler tagged {{{systemConfig}}} to make file dropping work
{{{
{
if(
confirm("You have dropped the file \""+nsiFile.path+"\" onto the page, it will be imported as a tiddler. Is that ok?")
)
{
var newDate = new Date();
var title = prompt("what would you like to name the tiddler?");
}
return true;
})
}}}

!Example Handler without popups and opening the tiddler on load
*Adds simple file import control, add this to a tiddler tagged {{{systemConfig}}} to make file dropping work
{{{
{
var newDate = new Date();
story.displayTiddler(null,nsiFile.path)
return true;
})
}}}

***/

//{{{
config.macros.fileDrop = {varsion : {major : 0, minor : 0, revision: 1}};
config.macros.fileDrop.customDropHandlers = [];

config.macros.fileDrop.dragDropHandler = function(evt) {

netscape.security.PrivilegeManager.enablePrivilege('UniversalXPConnect');
// Load in the native DragService manager from the browser.
var dragService = Components.classes["@mozilla.org/widget/dragservice;1"].getService(Components.interfaces.nsIDragService);

// Load in the currently-executing Drag/drop session.
var dragSession = dragService.getCurrentSession();

// Create an instance of an nsITransferable object using reflection.
var transferObject = Components.classes["@mozilla.org/widget/transferable;1"].createInstance();

// Bind the object explicitly to the nsITransferable interface. We need to do this to ensure that
// methods and properties are present and work as expected later on.
transferObject = transferObject.QueryInterface(Components.interfaces.nsITransferable);

// I've chosen to add only the x-moz-file MIME type. Any type can be added, and the data for that format

// Get the number of items currently being dropped in this drag/drop operation.
var numItems = dragSession.numDropItems;
for (var i = 0; i < numItems; i++)
{
// Get the data for the given drag item from the drag session into our prepared
// Transfer object.
dragSession.getData(transferObject, i);

// We need to pass in Javascript 'Object's to any XPConnect method which
// requires OUT parameters. The out value will then be saved as a new
// property called Object.value.
var dataObj = {};
var dropSizeObj = {};

for(var ind = 0; ind < config.macros.fileDrop.customDropHandlers.length; ind++)
{
var item = config.macros.fileDrop.customDropHandlers[ind];
if(dragSession.isDataFlavorSupported(item.flavor))
{
transferObject.getTransferData(item.flavor, dataObj, dropSizeObj);
var droppedFile = dataObj.value.QueryInterface(Components.interfaces.nsIFile);
// Display all of the returned parameters with an Alert dialog.
var result = item.handler.call(item,droppedFile);
// Since the event is handled, prevent it from going to a higher-level event handler.
evt.stopPropagation();
evt.preventDefault();
if(result){break;}
}
}
}
}

if(!window.event)
{
// Register the event handler, and set the 'capture' flag to true so we get this event
// before it bubbles up through the browser.
}

{
var obj = {};
obj.flavor = paramflavor;
obj.handler = func;
if(!inFront)
{config.macros.fileDrop.customDropHandlers.push(obj);}
else{config.macros.fileDrop.customDropHandlers.shift(obj);}
}
//}}}
//{{{
{
var newDate = new Date();
story.displayTiddler(null,nsiFile.path)
})
//}}}

Gamble everything for love,
if you're a true human being.
If not, leave this gathering.
- Rumi
/***
|Name|HaloscanMacro|
|Created by|JimSpeth|
|Location|http://end.com/~speth/HaloscanMacro.html|
|Version|1.1.0|
|Requires|~TW2.x|

!Description
Comment and trackback support for TiddlyWiki (via Haloscan).

!History
* 16-Feb-06, version 1.1.0, drastic changes, now uses settings from haloscan account config
* 31-Jan-06, version 1.0.1, fixed display of counts for default tiddlers
* 30-Jan-06, version 1.0, initial release

!Examples
|!Source|!Output|h

!Installation
Register for a [[Haloscan|http://www.haloscan.com]] account.  It's free and painless.
Install the HaloscanMacro in a new tiddler with a tag of systemConfig (save and reload to activate).
In the macro configuration code (below), change //YourName// to your Haloscan account name.
Use the macro somewhere in a tiddler (see ViewTemplate for an example).

!Settings
You can adjust various options for your account in the member configuration area of Haloscan's web site.  The macro will use these settings when formatting the links.

!Code
***/
//{{{

/* Set account to your Haloscan account name and idPrefix to a prefix unique to TiddlyWiki for this account. */
config.macros.haloscan = {account: "conal", idPrefix: "Conal's Journal", baseURL: "http://www.haloscan.com/load/"};

{
return;

account = config.macros.haloscan.account;
if (!account || (account == "YourName"))
account = store.getTiddlerText("SiteTitle");

var el = document.createElement('script');
el.language = 'JavaScript';
el.type = 'text/javascript';
el.src = config.macros.haloscan.baseURL + account;
document.documentElement.childNodes[0].appendChild(el);

}

/* this totally clobbers document.write, i hope that's ok */
var safeWrite = function(s)
{
document.written = s;
return s;
};
document.write = safeWrite;

config.macros.haloscan.refreshDefaultTiddlers = function ()
{
var start = store.getTiddlerText("DefaultTiddlers");
if (start)
{
for (var t=titles.length-1; t>=0; t--)
story.refreshTiddler(titles[t], DEFAULT_VIEW_TEMPLATE, 1);
}
}

var haloscanRefreshed = 0;
config.macros.haloscan.handler = function (place, macroName, params, wikifier, paramString, tiddler)
{
if (typeof HaloScan == 'undefined')
{
if (haloscanRefreshed == 0)
{
setTimeout("config.macros.haloscan.refreshDefaultTiddlers()", 1);
haloscanRefreshed = 1;
}
return;
}

var id = story.findContainingTiddler(place).id.substr(7);
// conal: commenting out the next two lines makes comment-finding work when the tiddler title contains hyphens.  The
// Haloscan and HaloscanTB functions already do this replacement.  I don't know why removing the redundant replace helps.
// var hs_search = new RegExp('\\W','gi');
// id = id.replace(hs_search,"_");
// conal: Prepend idPrefix to make sure different TWs get different comment ids.
id = config.macros.haloscan.idPrefix + ' tiddler ' + id

account = config.macros.haloscan.account;
if (!account || (account == "YourName"))
account = store.getTiddlerText("SiteTitle");

var haloscanError = function (msg)
{
createTiddlyError(place, config.messages.macroError.format(["HaloscanMacro"]), config.messages.macroErrorDetails.format(["HaloscanMacro", msg]));
}

if (params.length == 1)
{
{
postCount(id);
var commentsHandler = function(e) { HaloScan(id); return false; };
}
{
postCountTB(id);
var trackbacksHandler = function(e) { HaloScanTB(id); return false; };
}
else
haloscanError("unknown parameter: " + params[0]);
}
else if (params.length == 0)
haloscanError("missing parameter");
else
}

//}}}
Features:
* Easy to use & customize
* Elegant code / easy to extend
* Time- and space-efficient (important for high-traffic sites)
* Highly detailed -- no information loss
Competitors:
* [[AWStats|http://awstats.sourceforge.net]] (Perl)
* [[Analog|http://www.analog.cx]] (C)
/***
***/
/*{{{*/

#displayArea { margin: 1em 15.5em 0em 1em; } /* use the full horizontal width */

/*}}}*/
/***
|''Name:''|InlineJavascriptPlugin|
|''Source:''|http://www.TiddlyTools.com/#InlineJavascriptPlugin|
|''Author:''|Eric Shulman - ELS Design Studios|
|''~CoreVersion:''|2.0.10|

Insert Javascript executable code directly into your tiddler content.  Lets you ''call directly into TW core utility routines, define new functions, calculate values, add dynamically-generated TiddlyWiki-formatted output'' into tiddler content, or perform any other programmatic actions each time the tiddler is rendered.
!!!!!Usage
<<<
When installed, this plugin adds new wiki syntax for surrounding tiddler content with {{{<script>}}} and {{{</script>}}} markers, so that it can be treated as embedded javascript and executed each time the tiddler is rendered.

''Deferred execution from an 'onClick' link''
By including a label="..." parameter in the initial {{{<script>}}} marker, the plugin will create a link to an 'onclick' script that will only be executed when that specific link is clicked, rather than running the script each time the tiddler is rendered.

''External script source files:''
You can also load javascript from an external source URL, by including a src="..." parameter in the initial {{{<script>}}} marker (e.g., {{{<script src="demo.js"></script>}}}).  This is particularly useful when incorporating third-party javascript libraries for use in custom extensions and plugins.  The 'foreign' javascript code remains isolated in a separate file that can be easily replaced whenever an updated library file becomes available.

''Display script source in tiddler output''
By including the keyword parameter "show", in the initial {{{<script>}}} marker, the plugin will include the script source code in the output that it displays in the tiddler.

''Defining javascript functions and libraries:''
Although the external javascript file is loaded while the tiddler content is being rendered, any functions it defines will not be available for use until //after// the rendering has been completed.  Thus, you cannot load a library and //immediately// use it's functions within the same tiddler.  However, once that tiddler has been loaded, the library functions can be freely used in any tiddler (even the one in which it was initially loaded).

To ensure that your javascript functions are always available when needed, you should load the libraries from a tiddler that will be rendered as soon as your TiddlyWiki document is opened.  For example, you could put your {{{<script src="..."></script>}}} syntax into a tiddler called LoadScripts, and then add {{{<<tiddler LoadScripts>>}}} in your MainMenu tiddler.

Since the MainMenu is always rendered immediately upon opening your document, the library will always be loaded before any other tiddlers that rely upon the functions it defines.  Loading an external javascript library does not produce any direct output in the tiddler, so these definitions should have no impact on the appearance of your MainMenu.

''Creating dynamic tiddler content''
An important difference between this implementation of embedded scripting and conventional embedded javascript techniques for web pages is the method used to produce output that is dynamically inserted into the document:
* In a typical web document, you use the document.write() function to output text sequences (often containing HTML tags) that are then rendered when the entire document is first loaded into the browser window.
* However, in a ~TiddlyWiki document, tiddlers (and other DOM elements) are created, deleted, and rendered "on-the-fly", so writing directly to the global 'document' object does not produce the results you want (i.e., replacing the embedded script within the tiddler content), and completely replaces the entire ~TiddlyWiki document in your browser window.
* To allow these scripts to work unmodified, the plugin automatically converts all occurences of document.write() so that the output is inserted into the tiddler content instead of replacing the entire ~TiddlyWiki document.

If your script does not use document.write() to create dynamically embedded content within a tiddler, your javascript can, as an alternative, explicitly return a text value that the plugin can then pass through the wikify() rendering engine to insert into the tiddler display.  For example, using {{{return "thistext"}}} will produce the same output as {{{document.write("thistext")}}}.

//Note: your script code is automatically 'wrapped' inside a function, {{{_out()}}}, so that any return value you provide can be correctly handled by the plugin and inserted into the tiddler.  To avoid unpredictable results (and possibly fatal execution errors), this function should never be redefined or called from ''within'' your script code.//

''Accessing the ~TiddlyWiki DOM''
The plugin provides one pre-defined variable, 'place', that is passed in to your javascript code so that it can have direct access to the containing DOM element into which the tiddler output is currently being rendered.

Access to this DOM element allows you to create scripts that can:
* vary their actions based upon the specific location in which they are embedded
* access 'tiddler-relative' information (use findContainingTiddler(place))
* perform direct DOM manipulations (when returning wikified text is not enough)
<<<
!!!!!Examples
<<<
><script show>
alert('InlineJavascriptPlugin: this is a demonstration message');
</script>
dynamic output:
><script show>
return (new Date()).toString();
</script>
wikified dynamic output:
><script show>
</script>
dynamic output using 'place' to get size information for current tiddler:
><script show>
if (!window.story) window.story=window;
var title=story.findContainingTiddler(place).id.substr(7);
return title+" is using "+store.getTiddlerText(title).length+" bytes";
</script>
creating an 'onclick' button/link that runs a script:
if (!window.story) window.story=window;
</script>
>http://www.TiddlyTools.com/demo.js contains:
>>{{{function demo() { alert('this output is from demo(), defined in demo.js') } }}}
><script src="demo.js" show>
</script>
><script label="click to execute demo() function" show>
demo()
</script>
<<<
!!!!!Installation
<<<
import (or copy/paste) the following tiddlers into your document:
''InlineJavascriptPlugin'' (tagged with <<tag systemConfig>>)
<<<
!!!!!Revision History
<<<
''2006.10.16 [1.5.2]'' add newline before closing '}' in 'function out_' wrapper.  Fixes error caused when last line of script is a comment.
''2006.06.01 [1.5.1]'' when calling wikify() on script return value, pass hightlightRegExp and tiddler params so macros that rely on these values can render properly
''2006.04.19 [1.5.0]'' added 'show' parameter to force display of javascript source code in tiddler output
''2006.01.05 [1.4.0]'' added support 'onclick' scripts.  When label="..." param is present, a button/link is created using the indicated label text, and the script is only executed when the button/link is clicked.  'place' value is set to match the clicked button/link element.
''2005.12.13 [1.3.1]'' when catching eval error in IE, e.description contains the error text, instead of e.toString().  Fixed error reporting so IE shows the correct response text.  Based on a suggestion by UdoBorkowski
''2005.11.09 [1.3.0]'' for 'inline' scripts (i.e., not scripts loaded with src="..."), automatically replace calls to 'document.write()' with 'place.innerHTML+=' so script output is directed into tiddler content.  Based on a suggestion by BradleyMeck
''2005.11.08 [1.2.0]'' handle loading of javascript from an external URL via src="..." syntax
''2005.11.08 [1.1.0]'' pass 'place' param into scripts to provide direct DOM access
''2005.11.08 [1.0.0]'' initial release
<<<
!!!!!Credits
<<<
This feature was developed by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]]
<<<
!!!!!Code
***/
//{{{
version.extensions.inlineJavascript= {major: 1, minor: 5, revision: 2, date: new Date(2006,10,16)};

config.formatters.push( {
name: "inlineJavascript",
match: "\\<script",

handler: function(w) {
// make script tag, set src, add to body to execute, then remove for cleanup
var script = document.createElement("script"); script.src = lookaheadMatch[1];
document.body.appendChild(script); document.body.removeChild(script);
}
if (lookaheadMatch[4]) { // there is script code
if (lookaheadMatch[3]) // show inline script code in tiddler output
}
else { // run inline script code
code=code.replace(/document.write/gi,'place.innerHTML+=('); try { var out = eval(code); } catch(e) { out = e.description?e.description:e.toString(); } if (out && out.length) wikify(out,w.output,w.highlightRegExp,w.tiddler); } } w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length; } } } ) //}}} * [[Reactable demos| http://www.youtube.com/profile_videos?user=marcosalonso]] * [[Multi-Touch Interaction Research, Jeff Han| http://cs.nyu.edu/~jhan/ftirtouch/index.html]] Some [[plugin]]s cannot load until other plugins have loaded. There's no TiddlyWiki mechanism to record such dependencies. Tiddlers are loaded in lexical order of their names, so the thing to do is make sure that I use a name that comes //after// the names of any load-time dependencies. There is a [[discussion thread| http://groups-beta.google.com/group/TiddlyWiki/browse_frm/thread/9f5996eefa082d29]] on the TiddlyWiki group of mechanisms for expressing and managing dependencies. LoadDependencies are usually also UseDependencies, so for brevity I don't repeat LoadDependencies in a UseDependencies list. Examples: [[SyntaxifyPlugin: Haskell]], [[RewritePlugin: Haskell]]. [[site map]] <<tag project>> <<tag idea>> <<tag article>> <<tag "how to">> <<tag {{new Date().formatString("YYYY-0MM")}}>> [[sand box]] <<newTiddler>> <<newJournal 'YYYY-0MM-0DD' {{new Date().formatString("YYYY-0MM")}} 'day'>> <<tiddler ToggleRightSidebar>> <!--{{{--> <style type="text/css"> #contentWrapper {display:none;} body { background:#003; } </style> <div id="SplashScreen" style="border: 1px solid #ccc; -moz-border-radius:1em; display:block; text-align:center; width:400px; margin:100px auto; padding:.5em; color:#fff; font-size:24pt; font-family:verdana,arial,helvetica,sans; background-color:#006;">Conal's Journal is loading<div style="font-size: 18px; color:#fff;">please wait...</div></div> <!--}}}--> This section contains a ''large'' test of the [[AudioPlayerPlugin]] macros. It loads a lot of mp3 files at once and swamps the browser. See comment at [[2006-11-15]]. The audio clips come from the [[cnvc sound bytes page|http://www.cnvc.org/sbytes.htm]]. | searching for everyone's need |<<marshallism 200311>>| | jackal postal delivery |<<marshallism 200310>>| | how we experience needs |<<marshallism 200309>>| | how the past affects |<<marshallism 200308>>| | how Marshall sees our needs |<<marshallism 200307>>| | bullshit in giraffe |<<marshallism 200306>>| | beauty behind the judgment |<<marshallism 200305>>| | act self-fully |<<marshallism 200304>>| | needs as a gift example |<<marshallism 200303>>| | when your needs are heard |<<marshallism 200302>>| | what is a need? |<<marshallism 200301>>| | the more ugly sounding ... |<<marshallism 200212>>| | self full motivation |<<marshallism 200211>>| | how the past effects |<<marshallism 200210>>| | a need to Marshall |<<marshallism 20021004>>| | when you hear a demand |<<marshallism 20020927>>| | see your gift in their eyes |<<marshallism 20020920>>| | no but your needs matter |<<marshallism 20020913>>| | greatest menace on earth |<<marshallism 20020906>>| | bullshit in giraffe |<<marshallism 20020830>>| | beauty behind the judgment |<<marshallism 20020823>>| | act self-fully |<<marshallism 20020816>>| | what a request contains |<<marshallism 20020809>>| | do as I've requested only |<<marshallism 20020802>>| | to think you know what's right |<<marshallism 20020728>>| | the cause of male-itis |<<marshallism 20020721>>| | never let someone say what you are |<<marshallism 20020714>>| | never hear a jackal's thoughts |<<marshallism 20020707>>| | if you hear any rejection . . . |<<marshallism 20020626>>| | gratitude tells us . . . |<<marshallism 20020619>>| | a "no" is a need and request |<<marshallism 20020612>>| | a "no" is a gift |<<marshallism 20020605>>| | role play request example |<<marshallism 20020531>>| | "yes that isn't so" detector |<<marshallism 20020524>>| | we have a choice |<<marshallism 20020517>>| | to think in enemy images |<<marshallism 20020510>>| | love as a need |<<marshallism 20020503>>| | need focus |<<marshallism 20020426>>| | the need behind the "no" |<<marshallism 20020419>>| | focus on needs |<<marshallism 20020412>>| | every diagnosis of others |<<marshallism 20020405>>| | different needs, is never |<<marshallism 20020329>>| | what a request contains |<<marshallism 20020322>>| | the cost of hearing rejection |<<marshallism 20020315>>| | connected at heart level |<<marshallism 20020308>>| | thoughts that imply wrong |<<marshallism 20020301>>| | strategies differ, not needs |<<marshallism 20020222>>| | the cost of hearing rejection |<<marshallism 20020215>>| | a need contains no reference to a specific person |<<marshallism 20020104>>| | all humans have same needs |<<marshallism 20011228>>| | a hug is a mug when . . . |<<marshallism 20011221>>| | needs can be met many ways |<<marshallism 20011214>>| | we have choice and power |<<marshallism 20011207>>| | what you say is NOT empathy! |<<marshallism 20011130>>| | false needs/pseudo needs vs. the real stuff |<<marshallism 20011123>>| | the difference between mourning &amp; apology |<<marshallism 20011116>>| | mixing up needs and preferences |<<marshallism 20011109>>| | we never do anything wrong |<<marshallism 20011102>>| | when no request is made |<<marshallism 20011026>>| | we never know what we want |<<marshallism 20011019>>| | most important need |<<marshallism 20011012>>| /*** |Name|MoveablePanelPlugin| |Source|http://www.TiddlyTools.com/#MoveablePanelPlugin| |Version|1.3.4| |Author|Eric Shulman - ELS Design Studios| |License|http://www.TiddlyTools.com/#LegalStatements <<br>>and [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]| |~CoreVersion|2.1| |Type|plugin| |Requires|NestedSlidersPlugin| |Overrides|| |Description|| Add move, size, max/restore mouse event handling and fold/unfold, hover/scroll, and close/dock toolbar command items to any floating panel or tiddler. (see NestedSlidersPlugin for floating panel syntax/usage). !!!!!Usage <<< syntax: {{{<<moveablePanel>>}}} example: //using NestedSlidersPlugin 'floating panel' syntax// //{{{ +++^30em^[panel]<<moveablePanel>>this is a headline for the panel ---- this is a moveable floating panel with a few lines of text as an example for you to try... //note: this line is really long so you can see what happens to word wrapping when you re-size this panel// === //}}} Try it: +++^30em^[panel]<<moveablePanel>>this is a headline for the panel ---- this is a moveable floating panel with a few lines of text as an example for you to try... //note: this line is really long so you can see what happens to word wrapping when you re-size this panel// === When the mouse is just inside the edges of the tiddler/panel, the cursor will change to a "crossed-arrows" symbol, indicating that the panel is "moveable". Grab (click-hold) the panel anywhere in the edge area and then drag the mouse to reposition the panel. To resize the panel, hold the ''shift'' key and then grab the panel: the cursor will change to a "double-arrow" symbol. Drag a side edge of the panel to stretch horizontally or vertically, or drag a corner of the panel to stretch in both dimensions at once. Double-clicking anywhere in the edge area of a panel will 'maximize' it to fit the current browser window. When the mouse is anywhere over a panel (not just near the edge), a 'toolbar menu' appears in the ''upper right corner'', with the following command items: *fold/unfold: ''fold'' temporarily reduces the panel height to show just one line of text. ''unfold'' restores the panel height. *hover/scroll: when you scroll the browser window, the moveable panels scroll with it. ''hover'' lets you keep a panel in view, while the rest of the page content moves in the window. ''scroll'' restores the default scrolling behavior for the panel. //Note: Due to browser limitations, this feature is not currently available when using Internet Explorer (v6 or lower)... sorry.// *close: ''close'' hides a panel from the page display. If you have moved/resized a panel, closing it restores its default position and size. *dock: unlike a floating panel, a moveable //tiddler// does not "float" on the page until it has actually been moved from its default position. When moving a tiddler, the ''close'' command is replaced with ''dock'', which restores the tiddler to its default //non-floating// location on the page. <<< !!!!!Installation <<< import (or copy/paste) the following tiddlers into your document: ''MoveablePanelPlugin'' (tagged with <<tag systemConfig>>) Note: for compatibility, please also install the current version of ''NestedSlidersPlugin''. <<< !!!!!Revision History <<< ''2006.10.17 [1.3.4]'' when moving panel, adjust position for relative containing DIV ''2006.05.25 [1.3.3]'' in closePanel(), use p.button.onclick() so that normal processing (updating slider button tooltip, access key, etc.) is performed ''2006.05.11 [1.3.2]'' doc update ''2006.05.11 [1.3.1]'' re-define all functions within moveablePanel object (eliminate global window.* function definitions (and some "leaky closures" in IE) ''2006.05.11 [1.3.0]'' converted from inline javascript to true plugin ''2006.05.09 [1.2.3]'' in closePanel(), set focus to sliderpanel button (if any) ''2006.05.02 [1.2.2]'' in MoveOrSizePanel(), calculate adjustments for top and left when inside nested floating panels ''2006.04.06 [1.2.1]'' in getPanel(), allow redefinition or bypass of "moveable" tag (changed from hard-coded "tearoff") ''2006.03.29 [1.2.0]'' in getPanel(), require "tearoff" tag to enable floating tiddlers ''2006.03.13 [1.1.0]'' added handling for floating tiddlers and conditional menu display ''2006.03.06 [1.0.2]'' set move or resize cursor during mousetracking ''2006.03.05 [1.0.1]'' use "window" vs "document.body" so mousetracking in FF doesn't drop the panel when moving too quickly ''2006.03.04 [1.0.0]'' Initial public release <<< !!!!!Credits <<< This feature was developed by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] <<< !!!!!Code ***/ //{{{ version.extensions.moveablePanel= {major: 1, minor: 3, revision: 4, date: new Date(2006,10,17)}; //}}} //{{{ config.macros.moveablePanel= { handler: function(place,macroName,params) { var p=this.getPanel(place); if (!p) return; // remember original panel event handlers, size, location, border if (!p.saved) p.saved= { mouseover: p.onmouseover, mouseout: p.onmouseout, dblclick: p.ondblclick, top: p.style.top, left: p.style.left, width: p.style.width, height: p.style.height, position: p.style.position, border: p.style.border }; // create control menu items var menupos=p.className=="floatingPanel"?"float:right;":"position:absolute;right:2em;top:3em;"; var menustyle=p.className!="floatingPanel"?'style="border:1px solid #666;background:#ccc;color:#000;padding:0px .5em;"':""; var html='<div style="font-size:7pt;display:none;'+menupos+'">&nbsp;'; if (p.className=="floatingPanel") html+='<a href="javascript:;" title="reduce panel size" '+menustyle +' onclick="return config.macros.moveablePanel.foldPanel(this,event)">fold</a>&nbsp; '; if (!config.browser.isIE) html+='<a href="javascript:;" title="keep panel in view when scrolling"'+menustyle +' onclick="return config.macros.moveablePanel.hoverPanel(this,event)">hover</a>&nbsp; '; if (p.className=="floatingPanel") html+='<a href="javascript:;" title="close panel and reset to default size and position"'+menustyle +' onclick="return config.macros.moveablePanel.closePanel(this,event)">close</a>'; else html+='<a href="javascript:;" title="reset panel to default size and position"'+menustyle +' onclick="return config.macros.moveablePanel.closePanel(this,event)">dock</a>'; html+='</div>'; p.menudiv=createTiddlyElement(place,"span"); p.menudiv.innerHTML=html; // init mouse handling and tooltip p.title="drag edge to move, shift key=stretch, double-click=max/restore"; p.onmouseover=function(event) { if (this.className=="floatingPanel"||this.style.position=="absolute"||this.style.position=="fixed") { if (this.className!="floatingPanel") this.style.border="1px dotted #999"; // border around tiddler this.menudiv.firstChild.style.display="inline"; } if (this.saved.mouseover) return this.saved.mouseover(event); }; p.onmouseout=function(event) { this.menudiv.firstChild.style.display="none"; if (this.className!="floatingPanel") this.style.border=this.saved.border; if (this.saved.mouseout) return this.saved.mouseout(event); }; p.ondblclick=function(event) { if (!config.macros.moveablePanel.maximizePanel(this,event)) return false; // processed return this.saved.dblclick?this.saved.dblclick(event):true; }; p.onmousemove=function(event) { return config.macros.moveablePanel.setCursorPanel(this,event); }; p.onmousedown=function(event) { return config.macros.moveablePanel.moveOrSizePanel(this,event); }; }, getPanel: function(place) { var p=place; while (p && p.className!='floatingPanel') p=p.parentNode; if (p) return p; // floatingPanel p=story.findContainingTiddler(place); if (!p || !store.getTiddler(p.getAttribute("tiddler"))) return null; // not in a tiddler // moveable **tiddlers** in IE have LOTS of problems... DISABLED FOR NOW... but floating panels still work in IE if (config.browser.isIE) return null; // tiddlers tagged (e.g. with "moveable") to allow movement? use null or "" to bypass tag check var tag="moveable"; if (!tag || !tag.trim().length) return p; return (store.getTiddler(p.getAttribute("tiddler")).tags.find(tag)!=null)?p:null; // tiddler is tagged for moving }, processed: function(event) { event.cancelBubble=true; if (event.stopPropagation) event.stopPropagation(); return false; }, getClientWidth: function() { if(document.width!=undefined) return document.width; if(document.documentElement && document.documentElement.clientWidth) return document.documentElement.clientWidth; if(document.body && document.body.clientWidth) return document.body.clientWidth; if(window.innerWidth!=undefined) return window.innerWidth; return 100; // should never get here }, closePanel: function(place,event) { if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; if (p.hover) this.hoverPanel(p.hoverButton,event); if (p.folded) this.foldPanel(p.foldButton,event); p.maxed=false; p.style.top=p.saved.top; p.style.left=p.saved.left; p.style.width=p.saved.width; p.style.height=p.saved.height; p.style.position=p.saved.position; if (p.button) { p.button.focus(); onClickNestedSlider({target:p.button}); } // click on slider "button" (if any) to close the panel return this.processed(event); }, foldPanel: function(place,event) { if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; if (!p.foldButton) p.foldButton=place; if (p.folded) { p.style.height=p.folded_savedheight; p.style.overflow=p.folded_savedoverflow; } else { p.folded_savedheight=p.style.height; p.style.height="1em"; p.folded_savedoverflow=p.style.overflow; p.style.overflow="hidden"; } p.folded=!p.folded; place.innerHTML=p.folded?"unfold":"fold"; place.title=p.folded?"restore panel size":"reduce panel size"; return this.processed(event); }, hoverPanel: function(place,event) { if (config.browser.isIE) { return this.processed(event); } // 'fixed' position is not handled properly by IE :-( if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; if (!p.hoverButton) p.hoverButton=place; if (p.hover) p.style.position=p.hover_savedposition; else { p.hover_savedposition=p.style.position; p.style.position="fixed"; } p.hover=!p.hover; place.innerHTML=p.hover?"scroll":"hover"; place.title=p.hover?"make panel move with page when scrolling":"keep panel in view when scrolling page"; return this.processed(event); }, maximizePanel: function(place,event) { if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; var left=findPosX(p); var top=findPosY(p); var width=p.offsetWidth; var height=p.offsetHeight; var x=!config.browser.isIE?event.pageX:event.clientX; var y=!config.browser.isIE?event.pageY:event.clientY; if (x<left||x>=left+width||y<top||y>=top+height) return true; // not inside panel, let mousedown bubble through var edgeWidth=10; var edgeHeight=10; var isTop=(y-top<edgeHeight); var isLeft=(x-left<edgeWidth); var isBottom=(top+height-y<edgeHeight); var isRight=(left+width-x<edgeWidth); if (!(isTop||isLeft||isBottom||isRight)) return true; // not near an edge... let double click bubble through if (p.folded) this.foldPanel(p.foldButton,event); // unfold panel first (if needed) if (p.maxed) { p.style.top=p.max_savedtop; p.style.left=p.max_savedleft; p.style.width=p.max_savedwidth; p.style.height=p.max_savedheight; p.style.position=p.max_savedposition; } else { p.max_savedwidth=p.style.width; p.max_savedheight=p.style.height; p.max_savedtop=p.style.top; p.max_savedleft=p.style.left; p.max_savedposition=p.style.position; // IE gets the percentage stretch wrong if floating panel is inside a table p.style.width=config.browser.isIE?(getClientWidth()*0.95+"px"):"95%"; p.style.height="95%"; p.style.top=p.style.left='1em'; p.style.position="absolute"; } p.maxed=!p.maxed; return this.processed(event); }, setCursorPanel: function(place,event) { if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; var left=findPosX(p); var top=findPosY(p); var width=p.offsetWidth; var height=p.offsetHeight; var x=!config.browser.isIE?event.pageX:event.clientX; var y=!config.browser.isIE?event.pageY:event.clientY; if (x<left||x>=left+width||y<top||y>=top+height) return true; // not inside panel, let mousedown bubble through var edgeWidth=10; var edgeHeight=10; var isTop=(y-top<edgeHeight); var isLeft=(x-left<edgeWidth); var isBottom=(top+height-y<edgeHeight); var isRight=(left+width-x<edgeWidth); if (!(isTop||isLeft||isBottom||isRight)) { p.style.cursor="auto"; if (!p.savedtitle) p.savedtitle=p.title; p.title=""; } else { p.style.cursor=!event.shiftKey?"move":((isTop?'n':(isBottom?'s':''))+(isLeft?'w':(isRight?'e':''))+'-resize'); if (p.savedtitle) p.title=p.savedtitle; } return true; // let mouseover event bubble through }, moveOrSizePanel: function(place,event) { if (!event) var event=window.event; var p=this.getPanel(place); if (!p) return true; var left=findPosX(p); var top=findPosY(p); var width=p.offsetWidth; var height=p.offsetHeight; var x=!config.browser.isIE?event.pageX:event.clientX; var y=!config.browser.isIE?event.pageY:event.clientY; if (x<left||x>=left+width||y<top||y>=top+height) return true; // not inside panel, let mousedown bubble through var edgeWidth=10; var edgeHeight=10; var isTop=(y-top<edgeHeight); var isLeft=(x-left<edgeWidth); var isBottom=(top+height-y<edgeHeight); var isRight=(left+width-x<edgeWidth); if (!(isTop||isLeft||isBottom||isRight)) return true; // not near an edge... let mousedown bubble through // when resizing, change cursor to show directional (NSEW) "drag arrows" var sizing=event.shiftKey; // remember this for use during mousemove tracking if (sizing) p.style.cursor=((isTop?'n':(isBottom?'s':''))+(isLeft?'w':(isRight?'e':''))+'-resize'); var adjustLeft=0; var adjustTop=0; var pp=p.parentNode; while (pp && pp.style.position!='relative') pp=parent.parentNode; if (pp) { adjustLeft+=findPosX(pp); adjustTop+=findPosY(pp); } var pp=p.parentNode; while (pp && pp.className!="floatingPanel") pp=pp.parentNode; if (pp) { adjustLeft+=findPosX(pp); adjustTop+=findPosY(pp); } // start tracking mousemove events config.macros.moveablePanel.activepanel=p; var target=p; // if 'capture' handling not supported, track within panel only if (document.body.setCapture) { document.body.setCapture(); var target=document.body; } // IE if (window.captureEvents) { window.captureEvents(Event.MouseMove|Event.MouseUp,true); var target=window; } // moz if (target.onmousemove!=undefined) target.saved_mousemove=target.onmousemove; target.onmousemove=function(e){ if (!e) var e=window.event; var p=config.macros.moveablePanel.activepanel; if (!p) { this.onmousemove=this.saved_mousemove?this.saved_mousemove:null; return; } // PROBLEM: p.offsetWidth and p.offsetHeight do not seem to account for padding or borders // WORKAROUND: subtract padding and border (in px) when calculating new panel width and height // TBD: get these values from p.style... convert to px as needed. var paddingWidth=10.6667; var paddingHeight=10.6667; var borderWidth=1; var borderHeight=1; var adjustWidth=-(paddingWidth*2+borderWidth*2); var adjustHeight=-(paddingHeight*2+borderHeight*2); if (p.style.position!="absolute") { // convert relative DIV to movable absolute DIV p.style.position="absolute"; p.style.left=left+"px"; p.style.top=top+"px"; p.style.width=(width+adjustWidth)+"px"; p.style.top=(height+adjustHeight)+"px"; } var newX=!config.browser.isIE?e.pageX:e.clientX; var newY=!config.browser.isIE?e.pageY:e.clientY; if (sizing) { // resize panel // don't let panel get smaller than edge "grab" zones var minWidth=edgeWidth*2-adjustWidth; var minHeight=edgeHeight*2-adjustHeight; p.maxed=false; // make sure panel is not maximized if (p.folded) this.foldPanel(p.foldButton,e); // make sure panel is unfolded if (isBottom) var newHeight=height+newY-y+1; if (isTop) var newHeight=height-newY+y+1; if (isLeft) var newWidth=width-newX+x+1; if (isRight) var newWidth=width+newX-x+1; if (isLeft||isRight) p.style.width=(newWidth>minWidth?newWidth:minWidth)+adjustWidth+"px"; if (isLeft) p.style.left=left-adjustLeft+newX-x+1+"px"; if (isTop||isBottom) p.style.height=(newHeight>minHeight?newHeight:minHeight)+adjustHeight+"px"; if (isTop) p.style.top=top-adjustTop+newY-y+1+"px"; } else { // move panel p.style.top=top-adjustTop+newY-y+1+"px"; p.style.left=left-adjustLeft+newX-x+1+"px"; } var status=sizing?("size: "+p.style.width+","+p.style.height):("pos: "+p.style.left+","+p.style.top); window.status=status.replace(/(\.[0-9]+)|px/g,""); // remove decimals and "px" return config.macros.moveablePanel.processed(e); }; // stop tracking mousemove events if (target.onmouseup!=undefined) target.saved_mouseup=target.onmouseup; target.onmouseup=function(e){ if (!e) var e=window.event; if (this.releaseCapture) this.releaseCapture(); // IE if (this.releaseEvents) this.releaseEvents(Event.MouseMove|Event.MouseUp); // moz this.onmousemove=this.saved_mousemove?this.saved_mousemove:null; this.onmouseup=this.saved_mouseup?this.saved_mouseup:null; config.macros.moveablePanel.activepanel=null; window.status=""; return config.macros.moveablePanel.processed(e); }; return this.processed(event); // mousedown handled } }; //}}} I remember Marshall saying that one can tell from a language's culture whether that culture is violent, and specifically whether the language talks about //what people are//, not just what they //do//, //feel//, or //need//. Marshall also said: > Never let somebody in authority tell you what you are. <<marshallism 20020714>> I've been very uncomfortable with the culture of PSNCC, and lately I've been painfully sensitive to use of the word //trainer// to label what some people are or are not. Sorting out my reactions, I find the following: //(rest is notes)// * imprecision * implicitness of external validation/approval/evaluation * loss of convenient term for teaching NVC * labeling -- i want community support in walking our talk //Convenience//: what does it reveal that this culture wants validation/approval/evaluation to be more convenient thing to discuss than teaching NVC? /*** |au(#t(#h(#o(#r#)#)#)#):|Bradley Meck| |date:|12/03/2006| |use:|config.macros.nestedFormatter.getFormatter| |params:|String name, String openingMatch, String closingMatch, function Handler| !About This plugin's purpose is to produce a formatter that will allow for it to have nested structures. Included in this macro is an example at the bottom using parenthesis to give a font size increasing effect. !Example text source {{{ (#t(#e(#s#)t (#t(#hi#)s#)#)!#) }}} !Example result (#t(#e(#s#)t (#t(#hi#)s#)#)!#) See example code below. See also [[literate Haskell plugin]] for an example of a simplified formatter. The main difference in how the handler for such a handler function works for a nested formatter is that it has a second arguement {{{strArr}}}. {{{strArr}}} has 3 parts, the original tag {{{opening signifier strArr[0]}}} and the middle test {{{what is between the signifiers strArr[1]}}} and the ending tag {{{ending signifier strArr[2]}}}. w.matchText will return the entire string match from the first openning signifier to the last closing signifier {{{inclusive}}}. !Todo *restructure the code so that wikify is not called because it is such a heavy function call. *handle improper formatting nicely. *allow for capturing/bubbling style handler calls. !Changes |2006-12-30|[[Conal Elliott|http://conal.net]]|added {{{substAndWikify}}} and activated example.| !Code ***/ //{{{ config.macros.nestedFormatter = {}; config.macros.nestedFormatter.getFormatter = function(fname, openTag, closeTag, formattingHandler) { var formatterResult = {}; formatterResult.name = fname; formatterResult.match = openTag; formatterResult.openRegex = new RegExp(openTag,"m"); formatterResult.closeRegex = new RegExp(closeTag,"m"); formatterResult.handler = function(w) { var testString = w.source.substring(w.matchStart+w.matchLength); var strArr = [w.source.substring(w.matchStart,w.matchStart+w.matchLength)]; var depth = 1; var off = w.matchLength; var index = true;var endex = true; while (depth > 0 && (index || endex) && testString.length > 0){ index = this.openRegex.exec(testString); endex = this.closeRegex.exec(testString); //Found New Opening if (index && endex && index.index < endex.index) { depth++; off+=index.index+index[0].length; testString = testString.substring(index[0].length+index.index); } else if (!index || endex.index < index.index) { depth--; off+=endex.index+endex[0].length; testString = testString.substring(endex[0].length+endex.index); } } if (depth != 0) { createTiddlyText(w.output,w.matchText); } else { w.matchText = w.source.substring(w.matchStart,w.matchStart+off); strArr.push(w.matchText.substring(strArr[0].length,w.matchText.length-endex[0].length)); strArr.push(endex[0]); w.matchLength = w.matchText.length; w.nextMatch = w.matchStart + w.matchLength; formattingHandler(w,strArr); } }; return formatterResult; } // Here's a simplified variation that takes a string substitution function. config.macros.nestedFormatter.substAndWikify = function(fname, openTag, closeTag, stringFun) { return ( config.macros.nestedFormatter.getFormatter( fname,openTag,closeTag, function (w,s) { wikify(stringFun(s[1]),w.output,null,w.tiddler); })); }; // Example: config.formatters.push( config.macros.nestedFormatter.getFormatter("paren","\\(#","#\",function(w,s){
var elem = createTiddlyElement(w.output,"span")
wikify(s[1],elem,null,w.tiddler);
elem.style.fontSize = "120%";
}));

//}}}
/***
|''Name:''|NestedSlidersPlugin|
|''Source:''|http://www.TiddlyTools.com/#NestedSlidersPlugin|
|''Author:''|Eric Shulman - ELS Design Studios|
|''~CoreVersion:''|2.0.10|

Quickly make any tiddler content into an expandable 'slider' panel, without needing to create a separate tiddler to contain the slider content.  Optional syntax allows ''default to open'', ''custom button label/tooltip'' and ''automatic blockquote formatting.''

You can also 'nest' these sliders as deep as you like (see complex nesting example below), so that expandable 'tree-like' hierarchical displays can be created.  This is most useful when converting existing in-line text content to create in-line annotations, footnotes, context-sensitive help, or other subordinate information displays.

++++!!!!![Configuration]>
Debugging messages for 'lazy sliders' deferred rendering:
<<option chkDebugLazySliderDefer>> show debugging alert when deferring slider rendering
<<option chkDebugLazySliderRender>> show debugging alert when deferred slider is actually rendered
===
++++!!!!![Usage]>
When installed, this plugin adds new wiki syntax for embedding 'slider' panels directly into tiddler content.  Use {{{+++}}} and {{{===}}} to delimit the slider content.  Additional optional syntax elements let you specify
*default to open
*floater (with optional CSS width value)
*mouse auto rollover
*custom class/label/tooltip/accesskey
*automatic blockquote
*deferred rendering
The complete syntax, using all options, is:
//{{{
content goes here
===
//}}}
where:
* {{{+++}}} (or {{{++++}}}) and {{{===}}}^^
marks the start and end of the slider definition, respectively.  When the extra {{{+}}} is used, the slider will be open when initially displayed.^^
saves the slider opened/closed state, and restores this state whenever the slider is re-rendered.^^
* {{{!}}} through {{{!!!!!}}}^^
* {{{^width^}}} (or just {{{^}}})^^
makes the slider 'float' on top of other content rather than shifting that content downward.  'width' must be a valid CSS value (e.g., "30em", "180px", "50%", etc.).  If omitted, the default width is "auto" (i.e., fit to content)^^
* {{{*}}}^^
automatically opens/closes slider on "rollover" as well as when clicked^^
* {{{{{class{[label=key|tooltip]}}}}}}^^
uses custom label/tooltip/accesskey.  {{{{{class{...}}}}}}, {{{=key}}} and {{{|tooltip}}} are optional.  'class' is any valid CSS class name, used to style the slider label text.  'key' must be a ''single letter only''.  Default labels/tootips are: ">" (more) and "<" (less), with no default access key assignment.^^
* {{{">"}}} //(without the quotes)//^^
automatically adds blockquote formatting to slider content^^
* {{{"..."}}} //(without the quotes)//^^
defers rendering of closed sliders until the first time they are opened.  //Note: deferred rendering may produce unexpected results in some cases.  Use with care.//^^

//Note: to make slider definitions easier to read and recognize when editing a tiddler, newlines immediately following the {{{+++}}} 'start slider' or preceding the {{{===}}} 'end slider' sequence are automatically supressed so that excess whitespace is eliminated from the output.//
===
++++!!!!![Examples]>
simple in-line slider:
{{{
+++
content
===
}}}
+++
content
===
----
use a custom label and tooltip:
{{{
+++[label|tooltip]
content
===
}}}
+++[label|tooltip]
content
===
----
content automatically blockquoted:
{{{
+++>
content
===
}}}
+++>
content
===
----
all options combined //(default open, cookie, heading, sized floater, rollover, class, label/tooltip/key, blockquoted, deferred)//
{{{
++++(testcookie)!!!^30em^*{{big{[label=Z|click or press Alt-Z to open]}}}>...
content
===
}}}
++++(testcookie)!!!^30em^*{{big{[label=Z|click or press Alt-Z to open]}}}>...
content
===
----
complex nesting example:
{{{
+++^[get info...=I|click for information or press Alt-I]
put some general information here, plus a floating slider with more specific info:
+++^10em^[view details...|click for details]
put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
===
===
}}}
+++^[get info...=I|click for information or press Alt-I]
put some general information here, plus a floating slider with more specific info:
+++^10em^[view details...|click for details]
put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
===
===
===
!!!!!Installation
<<<
import (or copy/paste) the following tiddlers into your document:
''NestedSlidersPlugin'' (tagged with <<tag systemConfig>>)
<<<
!!!!!Revision History
<<<
''2006.07.28 - 2.0.0'' added custom class syntax around label/tip/key syntax: {{{{{classname{[label=key|tip]}}}}}}
''2006.07.25 - 1.9.3'' when parsing slider, save default open/closed state in button element, then in onClickNestedSlider(), if slider state matches saved default, instead of saving cookie, delete it.  Significantly reduces the 'cookie overhead' when default slider states are used.
''2006.06.29 - 1.9.2'' in onClickNestedSlider(), when setting focus to first control, skip over type="hidden"
''2006.06.22 - 1.9.1'' added panel.defaultPanelWidth to save requested panel width, even after resizing has changed the style value
''2006.05.11 - 1.9.0'' added optional '^width^' syntax for floating sliders and '=key' syntax for setting an access key on a slider label
''2006.05.09 - 1.8.0'' in onClickNestedSlider(), when showing panel, set focus to first child input/textarea/select element
''2006.04.24 - 1.7.8'' in adjustSliderPos(), if floating panel is contained inside another floating panel, subtract offset of containing panel to find correct position
''2006.02.16 - 1.7.7'' corrected deferred rendering to account for use-case where show/hide state is tracked in a cookie
''2006.02.15 - 1.7.6'' in adjustSliderPos(), ensure that floating panel is positioned completely within the browser window (i.e., does not go beyond the right edge of the browser window)
''2006.02.04 - 1.7.5'' add 'var' to unintended global variable declarations to avoid FireFox 1.5.0.1 crash bug when assigning to globals
''2006.01.18 - 1.7.4'' only define adjustSliderPos() function if it has not already been provided by another plugin.  This lets other plugins 'hijack' the function even when they are loaded first.
''2006.01.16 - 1.7.3'' added adjustSliderPos(place,btn,panel,panelClass) function to permit specialized logic for placement of floating panels.  While it provides improved placement for many uses of floating panels, it exhibits a relative offset positioning error when used within *nested* floating panels.  Short-term workaround is to only adjust the position for 'top-level' floaters.
''2006.01.16 - 1.7.2'' added button property to slider panel elements so that slider panel can tell which button it belongs to.  Also, re-activated and corrected animation handling so that nested sliders aren't clipped by hijacking Slider.prototype.stop so that "overflow:hidden" can be reset to "overflow:visible" after animation ends
''2006.01.14 - 1.7.1'' added optional "^" syntax for floating panels.  Defines new CSS class, ".floatingPanel", as an alternative for standard in-line ".sliderPanel" styles.
''2006.01.14 - 1.7.0'' added optional "*" syntax for rollover handling to show/hide slider without requiring a click (Based on a suggestion by tw4efl)
''2006.01.03 - 1.6.2'' When using optional "!" heading style, instead of creating a clickable "Hn" element, create an "A" element inside the "Hn" element.  (allows click-through in SlideShowPlugin, which captures nearly all click events, except for hyperlinks)
''2005.12.15 - 1.6.1'' added optional "..." syntax to invoke deferred ('lazy') rendering for initially hidden sliders
removed checkbox option for 'global' application of lazy sliders
''2005.11.25 - 1.6.0'' added optional handling for 'lazy sliders' (deferred rendering for initially hidden sliders)
''2005.11.21 - 1.5.1'' revised regular expressions: if present, a single newline //preceding// and/or //following// a slider definition will be suppressed so start/end syntax can be place on separate lines in the tiddler 'source' for improved readability.  Similarly, any whitespace (newlines, tabs, spaces, etc.) trailing the 'start slider' syntax or preceding the 'end slider' syntax is also suppressed.
''2005.11.20 - 1.5.0'' added (cookiename) syntax for optional tracking and restoring of slider open/close state
''2005.11.07 - 1.3.0'' removed alternative syntax {{{(((}}} and {{{)))}}} (so they can be used by other
formatting extensions) and simplified/improved regular expressions to trim multiple excess newlines
''2005.11.05 - 1.2.1'' changed name to NestedSlidersPlugin
more documentation
''2005.11.04 - 1.2.0'' added alternative character-mode syntax {{{(((}}} and {{{)))}}}
tweaked "eat newlines" logic for line-mode {{{+++}}} and {{{===}}} syntax
''2005.11.03 - 1.1.1'' fixed toggling of default tooltips ("more..." and "less...") when a non-default button label is used
''2005.11.03 - 1.1.0'' changed delimiter syntax from {{{(((}}} and {{{)))}}} to {{{+++}}} and {{{===}}}
changed name to EasySlidersPlugin
''2005.11.03 - 1.0.0'' initial public release
<<<
!!!!!Credits
<<<
This feature was implemented by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] with initial research and suggestions from RodneyGomes, GeoffSlocock, and PaulPetterson.
<<<
!!!!!Code
***/
//{{{
version.extensions.nestedSliders = {major: 2, minor: 0, revision: 0, date: new Date(2006,7,28)};
//}}}

//{{{
// options for deferred rendering of sliders that are not initially displayed
if (config.options.chkDebugLazySliderDefer==undefined) config.options.chkDebugLazySliderDefer=false;
if (config.options.chkDebugLazySliderRender==undefined) config.options.chkDebugLazySliderRender=false;

// default styles for 'floating' class
setStylesheet(".floatingPanel { position:absolute; z-index:10; padding:0.5em; margin:0em; \
background-color:#eee; color:#000; border:1px solid #000; text-align:left; }","floatingPanelStylesheet");
//}}}

//{{{
config.formatters.push( {
name: "nestedSliders",
match: "\\n?\\+{3}",
terminator: "\\s*\\={3}\\n?",
lookahead: "\\n?\\+{3}(\\+)?(\$$[^\$$]*\\))?(\\!*)?(\\^(?:[^\\^\\*\$\\>]*\\^)?)?(\\*)?(?:\\{\\{([\\w]+[\\s\\w]*)\\{)?(\\[[^\$]*\\])?(?:\\}{3})?(\\>)?(\\.\\.\\.)?\\s*",
handler: function(w)
{

{
// location for rendering button and panel
var place=w.output;

// default to closed, no cookie, no accesskey
var show="none"; var title=">"; var tooltip="show"; var cookie=""; var key="";

// extra "+", default to open
{ show="block"; title="<"; tooltip="hide"; }

// cookie, use saved open/closed state